내가 프로젝트에서 DBA/DA 업무 수행하면서 쓰던 쿼리들 정리한 것을 올려야 겠다.
DDL.. 권한.. 기타 조회성.. 일일작업 등등..
첫번째는 기타 조회성 및 참고쿼리들이고 두번째는 일일 모니터링하던 쿼리다.(그냥 나 혼자 정리하던거니.. 참고만..)
SublimeText에서 노트패드 켜놓고 ctrl+F로 찾기해서 쓰던거라 쭈우우우욱 나열만 되어있으나..
마찬가지로 찾기해서 쓸 수는 있을 듯 하여 올린다..
첫번째. 기타 조회성 및 참고쿼리
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 00. Lock 조회 및 kill @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
select t.relname
, l.locktype
, page
, virtualtransaction
, l.pid
, mode
, granted
, s.usename
, s.query
, s.*
from pg_locks l,
pg_stat_all_tables t,
pg_stat_activity s
where l.relation = t.relid
and l.pid = s.pid
-- and relname = :table_name -- ''
and client_addr = '' -- 내 ip로만 조회
and application_name = 'psql' -- 내 실행 application명만 조회
;
-- 내 application_name 조회
show application_name;
-- process kill
select pg_cancel_backend(:PID);
-- 위에거로 안되면 process kill
select pg_terminate_backend(pid)
from pg_stat_activity
where pid = :PID;
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 01. 테이블 스키마 조회 (컬럼 단위) @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
with schema_column_p as (
select c.table_schema
, c.table_name
, obj_description((c.table_schema || '.' || c.table_name)::regclass, 'pg_class') as table_comment
, c.column_name
, pgd.description as column_comment
, (select d.constraint_name as pk
from information_schema.constraint_column_usage d
join information_schema.table_constraints e
on e.constraint_type = 'PRIMARY KEY'
and e.table_catalog = d.table_catalog
and e.table_schema = d.table_schema
and e.table_name = d.table_name
and e.constraint_name = d.constraint_name
and c.table_schema = d.table_schema
and c.table_name = d.table_name
and c.column_name = d.column_name)
, (select tc.constraint_name as fk
from information_schema.table_constraints tc
join information_schema.key_column_usage kcu
on kcu.constraint_name = tc.constraint_name
and tc.constraint_type = 'FOREIGN KEY'
join information_schema.constraint_column_usage ccu
on ccu.constraint_name = tc.constraint_name
where 1=1
and c.table_schema = tc.table_schema
and c.table_name = tc.table_name
and c.column_name = kcu.column_name)
, column_default
, is_nullable
, case when c.udt_name = 'varchar'
then concat('varchar(', c.character_maximum_length, ')')
when c.udt_name = 'numeric'
then concat('numeric(', c.numeric_precision_radix, ',', numeric_scale, ')')
else c.udt_name
end as "type"
, c.ordinal_position
, data_type
, character_maximum_length as data_length
from information_schema.columns c
left join pg_catalog.pg_statio_all_tables as st
on c.table_schema = st.schemaname
and c.table_name = st.relname
left join pg_catalog.pg_description pgd
on pgd.objoid = st.relid
and pgd.objsubid = c.ordinal_position
where 1=1
-- and c.table_schema in ('') -- 추가
and c.table_schema not like 'pg%'
)
select t01.*
from schema_column_p t01
where 1=1
order by table_schema, table_name, ordinal_position
;
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 01-1~. 기타 스키마 조회 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
-- unlogged 테이블 조회
select relname, reltype::regtype,*
from pg_class
where relpersistence = 'u';
-- 01-1. 오브젝트 조회
-- 스키마별 table, view 등 개수 조회
SELECT n.nspname as schema_name
, CASE c.relkind
WHEN 'r' THEN 'table'
WHEN 'v' THEN 'view'
WHEN 'i' THEN 'index'
WHEN 'S' THEN 'sequence'
WHEN 's' THEN 'special'
END as object_type
, count(1) as object_count
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','i','S','s')
--AND n.nspname IN ('schema_name#1', 'schema_name#2', ... )
GROUP BY n.nspname, CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' END
ORDER BY n.nspname, CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' END;
-- 스키마별 func, procedure 개수 조회
SELECT n.nspname
, CASE p.prokind WHEN 'f' THEN 'normal function' WHEN 'p' THEN 'procedure' WHEN 'a' THEN 'aggregate function' END
, count(*)
FROM pg_catalog.pg_namespace n
JOIN pg_catalog.pg_proc p
ON p.pronamespace = n.oid
--WHERE n.nspname IN ('schema_name#1', 'schema_name#2', ... )
GROUP BY n.nspname, CASE p.prokind WHEN 'f' THEN 'normal function' WHEN 'p' THEN 'procedure' WHEN 'a' THEN 'aggregate function' END
ORDER BY n.nspname, CASE p.prokind WHEN 'f' THEN 'normal function' WHEN 'p' THEN 'procedure' WHEN 'a' THEN 'aggregate function' END;
-- unlogged 테이블 조회
select relname, reltype::regtype-- ,*
from pg_class
where relpersistence = 'u' -- p = 영구 테이블/시퀀스, u = 기록되지 않은 테이블/시퀀스, t = 임시 테이블/시퀀스
and reltype <> 0;
-- 01-2. index 조회
SELECT *
FROM pg_indexes
WHERE tablename = ''
;
-- index 생성 쿼리
SELECT schemaname, tablename, indexname, tablespace, 'CREATE ' || :UNIQUE || ' INDEX idx_' || tablename || '_0' || (select COUNT(*) FROM pg_indexes WHERE tablename = T.tablename)
|| ' ON ' || schemaname || '.' || tablename || ' USING btree ' || :column || ';'
FROM pg_indexes T
WHERE tablename = :tablename
;
-- 안쓰는 인덱스 조회용 https://blog.gaerae.com/2015/09/postgresql-index.html
select schemaname AS schema_name
, relname AS table_name
, indexrelname AS index_name
, pg_size_pretty(pg_relation_size(indexrelid::regclass)) AS index_size
, idx_scan
, idx_tup_read
, idx_tup_fetch
from pg_stat_user_indexes t
where exists (select 1 -- schemaname, tablename, count(*)
from pg_indexes s
where t.schemaname = s.schemaname
and t.relname = s.tablename
group by schemaname, tablename
having count(*) > 5) -- 인덱스 개수 5개 이상인 테이블만 조회
order by idx_scan asc, table_name, index_size, index_name;
-- 인덱스 크기
select indexrelid::regclass, indrelid::regclass, pg_size_pretty(pg_total_relation_size(indexrelid)) AS 인덱스_크기
from pg_index
WHERE indexrelid::regclass::varchar = 'schema.index'
;
-- false면 가용 불가능한 인덱스임. 리빌드 대상.
select *
from pg_index
where indisvalid = false;
-- index 리빌드 (bloat 포함) https://www.postgresql.org/docs/current/sql-reindex.html
-- CONCURRENTLY 하니까 넘....모 오래걸림.. 흠
REINDEX INDEX CONCURRENTLY index_name;
-- index 명 뒤에 _ccnew가 붙는 경우 동시 작업(CONCURRENTLY) 간에 생성된 임시 index임. reindex가 완료 후에도 남아 있다면 다시 reindex 하는 것을 권장함.
psql=# \d tablename
-- index rebuild 진행상황 조회
-- 진행 상황 별 phase 내용 https://www.postgresql.org/docs/current/progress-reporting.html#ANALYZE-PHASES
select * from pg_stat_progress_create_index ;
-- 01-3. 기타 조회
-- 인덱스
select *
from pg_indexes
;
-- 트리거
select *
from information_schema.triggers
;
-- 테이블
select *
from pg_tables s
;
-- 시퀀스
select *
from information_schema.sequences
left join information_schema.role_usage_grants
on sequence_name = object_name
;
-- 파티션 테이블 조회
select *
from pg_class pc
join pg_inherits pi
on pc.oid = pi.inhrelid
join pg_partitioned_table ppt
on ppt.partrelid = pi.inhparent
;
select inhparent::regclass as parent_table
, inhrelid::regclass as child_table
from pg_inherits
order by parent_table, child_table;
-- 인덱스 제외하고 파티셔닝된 부모 테이블만 조회
select i.inhparent::regclass -- c.relname as table_name
from pg_class c
join pg_inherits i
on c.oid = i.inhrelid
where 1=1
and c.relkind = 'r' -- 'r'은 테이블을 나타냄
and not exists (select 1
from pg_index idx
where idx.indexrelid = c.oid )
group by i.inhparent::regclass
order by i.inhparent::regclass::varchar
;
--파티션 관련 참고 : https://velog.io/@dailylifecoding/postgresql-table-partitioning
-- 크기 확인
select
pg_size_pretty(pg_relation_size(oid)) as pretty_size,
relname,
case
when relkind = 'r' then '일반 테이블'
when relkind = 'i' then '인덱스'
when relkind = 'S' then '시퀀스'
when relkind = 'v' then '뷰'
when relkind = 'm' then '마테리얼라이즈드 뷰'
when relkind = 'c' then '컴파일된 테이블 함수'
when relkind = 'f' then '컴파일된 외부 테이블 함수'
when relkind = 'p' then '파티션'
when relkind = 'I' then '퍼지 인덱스'
when relkind = 's' then '토스트 테이블'
when relkind = 't' then 'TOAST 인덱스'
end reltype_nm,
relispartition -- 파티션 테이블 여부
from pg_class
where relname like '%%'
order by pg_relation_size(oid) desc;
-- 01-3-1. 권한 조회
-- 테이블 grant 확인
SELECT grantee
, grantor
, table_schema
, table_name
, pg_catalog.has_schema_privilege(grantee, table_schema, 'CREATE') AS "create"
, pg_catalog.has_schema_privilege(grantee, table_schema, 'USAGE') AS "usage"
, privilege_type -- , t.*-- grantee, privilege_type
FROM information_schema.role_table_grants t
WHERE table_schema in ('')
order by grantee, table_schema, table_name, privilege_type
;
-- 함수 및 권한
select *
from information_schema.routine_privileges
;
-- 함수 등 owner 조회
SELECT proname AS 함수이름, pg_roles.rolname AS 소유자
FROM pg_proc
JOIN pg_roles
ON pg_proc.proowner = pg_roles.oid
where proname like 'fn_%';
-- 시퀀스 권한 조회
select sequence_schema
, sequence_name
, privilege_type
, grantee
, is_grantable
, pg_catalog.has_schema_privilege(grantee, sequence_schema, 'USAGE') AS "schema_usage"
, 'select '''||sequence_schema||'.'||sequence_name||''', last_value, log_cnt, is_called from '||sequence_schema||'.'||sequence_name||' union all' -- 시퀀스 내용 조회 쿼리 생성용
from information_schema.sequences
left join information_schema.role_usage_grants
on sequence_name = object_name
where sequence_schema = ''
order by sequence_schema, is_grantable, sequence_name, grantee
;
-- 권한 비교
select *
from information_schema.role_table_grants s
;
-- role 별 스키마 권한 확인
with "schemas"("schema_name") as (
select n.nspname
from pg_catalog.pg_namespace n
where n.nspname !~ '^pg_'
and n.nspname <> 'information_schema'
)
select schema_name
, s.usename as role
, pg_catalog.has_schema_privilege(s.usename, schema_name, 'CREATE') AS "create"
, pg_catalog.has_schema_privilege(s.usename, schema_name, 'USAGE') AS "usage"
from schemas t
join pg_user s
on 1=1
where schema_name = ''
;
--유저 권한 조회
select grantor::regrole::varchar(100) as grantor
, member::regrole::varchar(100) as grantee
, string_agg(roleid::regrole || (case when admin_option = true then '(★)' else '' end),', ' order by roleid::regrole,admin_option) as grant_roles -- admin_option: is_grantable
from pg_catalog.pg_auth_members pam
group by grantor, member
;
-- 통계정보 상세 보기
select * from pg_stats
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 03. DDL 참고용 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
alter table :schema.:table_name add column :column_name :type();
comment on column :schema.:table_name.:column_name is '컬럼_코멘트';
-- 컬럼 rename
ALTER TABLE :schema.:table_name RENAME COLUMN :column_name TO :new_column_name;
-- create sequence
CREATE SEQUENCE sequence_name
INCREMENT BY 1
MINVALUE 1
MAXVALUE 9223372036854775807
START 1
CACHE 1
NO CYCLE;
commit;
-- 시퀀스를 default값으로 지정
ALTER TABLE schema.table_name ALTER COLUMN column_name SET DEFAULT nextval('schema.sequence_name');
-- 시퀀스 시작값 변경
ALTER SEQUENCE sequence_name START 1;
-- 현재값 바꾸려면 RESTART
ALTER SEQUENCE sequence_name RESTART 1;
-- drop pk
ALTER TABLE schema.table_name DROP CONSTRAINT constraint_name;
-- create pk
ALTER TABLE schema.table_name ADD CONSTRAINT constraint_name PRIMARY KEY (column...);
-- rename pk
ALTER INDEX schema.constraint_name RENAME TO new_constraint_name;
-- create index
CREATE INDEX constraint_name ON schema.table_name USING btree (column...);
-- 테이블 복사.
-- 인덱스 등 기타 사항 전부 복사
create table copy_target_table (like copy_source_table including all)
;
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 04. 권한 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
-- role 생성
CREATE ROLE rolename WITH PASSWORD '';
1. Grant CONNECT to the database:
GRANT CONNECT ON DATABASE database_name TO username;
ALTER ROLE username LOGIN;
2. Grant USAGE on schema:
GRANT USAGE ON SCHEMA schema_name TO username;
3. Grant on all tables for DML statements: SELECT, INSERT, UPDATE, DELETE:
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA schema_name TO username;
4. Grant all privileges on all tables in the schema:
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA schema_name TO username;
5. Grant all privileges on all sequences in the schema:
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA schema_name TO username;
-- USAGE, SELECT
GRANT USAGE, SELECT ON SEQUENCE sequence_name TO username;
5-1. Grant all privileges on all FUNCTIONS in the schema:
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA schema_name TO username;
-- EXECUTE
GRANT EXECUTE ON FUNCTION function_name TO username;
6. Grant all privileges on the database:
GRANT ALL PRIVILEGES ON DATABASE database_name TO username;
7. Grant permission to create database:
ALTER USER username CREATEDB;
8. Make a user superuser:
ALTER USER myuser WITH SUPERUSER;
9. Remove superuser status:
ALTER USER username WITH NOSUPERUSER;
Those statements above only affect the current existing tables. To apply to newly created tables, you need to use alter default. For example:
ALTER DEFAULT PRIVILEGES FOR USER username IN SCHEMA schema_name GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO username;
GRANT ALL PRIVILEGES ON ALL FUNCTIONS [IN SCHEMA [schema]] TO username;
GRANT ALL PRIVILEGES ON ALL SEQUENCES [IN SCHEMA [schema]] TO username;
GRANT ALL PRIVILEGES ON ALL PROCEDURES [IN SCHEMA [schema]] TO username;
GRANT SELECT, INSERT, UPDATE , DELETE ON ALL TABLES [IN SCHEMA [schema]] TO username;
-- search_path 지정하기
-- 사용할 계정으로 접속해서 수행
-- ALTER ROLE username SET search_path to schema, schema,... $username;
-- 앞으로 생 성 될 테 이 블 까 지 권 한 부 여
ALTER DEFAULT PRIVILEGES [IN SCHEMA [schema]] GRANT [ALL PRIVILEGES] ON TABLES TO [role];
ALTER DEFAULT PRIVILEGES [IN SCHEMA [schema]] GRANT ALL PRIVILEGES ON TABLES TO username;
ALTER DEFAULT PRIVILEGES [IN SCHEMA [schema]] GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO username;
-- owner 변경
ALTER FUNCTION function_name(varchar, varchar, int4) OWNER TO username;
ALTER PROCEDURE procedure_name() OWNER TO username;
-- 테이블 생성권한
grant create on schama test_schema to testuser;
두번째. 일일 모니터링하던 쿼리
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- 0. cache hit rate
with all_tables as (
select *
from (select 'all' as schemaname
, 'all'::text as table_name
, sum( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk
, sum( (coalesce(heap_blks_hit,0) + coalesce(idx_blks_hit,0) + coalesce(toast_blks_hit,0) + coalesce(tidx_blks_hit,0)) ) as from_cache
from pg_statio_all_tables --> change to pg_statio_user_tables if you want to check only user tables (excluding postgres's own tables)
) a
where (from_disk + from_cache) > 0 -- discard tables without hits
), tables as (
select *
from ( select schemaname
, relname as table_name
, ( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk
, ( (coalesce(heap_blks_hit,0) + coalesce(idx_blks_hit,0) + coalesce(toast_blks_hit,0) + coalesce(tidx_blks_hit,0)) ) as from_cache
from pg_statio_all_tables --> change to pg_statio_user_tables if you want to check only user tables (excluding postgres's own tables)
) a
where (from_disk + from_cache) > 0 -- discard tables without hits
)
select schemaname
, table_name as "table name"
, from_disk as "disk hits"
, from_cache as "cache hits"
, round((from_disk::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% disk hits"
, round((from_cache::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% cache hits"
, (from_disk + from_cache) as "total hits"
from (select * from all_tables union all select * from tables) a
order by (case when table_name = 'all' then 0 else 1 end), from_disk desc, schemaname, table_name
;
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- 1. index 안쓰는 것 조회
-- https://blog.gaerae.com/2015/09/postgresql-index.html
select schemaname AS schema_name
, relname AS table_name
, indexrelname AS index_name
, pg_size_pretty(pg_relation_size(indexrelid::regclass)) AS index_size
, idx_scan
, idx_tup_read
, idx_tup_fetch
from pg_stat_all_indexes t
where schemaname not like 'pg_%'
-- where exists (select 1 -- schemaname, tablename, count(*)
-- from pg_indexes s
-- where t.schemaname = s.schemaname
-- and t.relname = s.tablename
-- group by schemaname, tablename
-- having count(*) > 5) -- 인덱스 개수 5개 이상인 테이블만 조회
order by idx_scan asc, table_name, index_size, index_name;
-- 비가용 index 조회
select *
from pg_index
where indisvalid = false;
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- 2-1. bloat 조회
-- https://github.com/pgexperts/pgx_scripts/blob/master/bloat/index_bloat_check.sql
WITH constants AS (
-- define some constants for sizes of things
-- for reference down the query and easy maintenance
SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 8 AS ma
),
no_stats AS (
-- JSON과 같이 통계가 없는 속성을 가진 테이블을 선별
-- screen out table who have attributes
-- which dont have stats, such as JSON
SELECT table_schema, table_name,
n_live_tup::numeric as est_rows,
pg_table_size(relid)::numeric as table_size
FROM information_schema.columns
JOIN pg_stat_all_tables as psut
ON table_schema = psut.schemaname
AND table_name = psut.relname
LEFT OUTER JOIN pg_stats
ON table_schema = pg_stats.schemaname
AND table_name = pg_stats.tablename
AND column_name = attname
WHERE attname IS NULL
AND table_schema NOT IN ('pg_catalog', 'information_schema')
-- and tablename = :tablename
GROUP BY table_schema, table_name, relid, n_live_tup
),
null_headers AS (
-- null 헤더 크기 계산
-- 완전한 통계가 없는 테이블과 보이지 않는 속성 생략
-- calculate null header sizes
-- omitting tables which dont have complete stats
-- and attributes which aren't visible
SELECT
hdr+1+(sum(case when null_frac <> 0 THEN 1 else 0 END)/8) as nullhdr,
SUM((1-null_frac)*avg_width) as datawidth,
MAX(null_frac) as maxfracsum,
schemaname,
tablename,
hdr, ma, bs
FROM pg_stats CROSS JOIN constants
LEFT OUTER JOIN no_stats
ON schemaname = no_stats.table_schema
AND tablename = no_stats.table_name
WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
AND no_stats.table_name IS NULL
AND EXISTS ( SELECT 1
FROM information_schema.columns
WHERE schemaname = columns.table_schema
AND tablename = columns.table_name )
-- and tablename = :tablename
GROUP BY schemaname, tablename, hdr, ma, bs
),
data_headers AS (
-- 헤더 및 행 크기 추정
-- estimate header and row size
SELECT
ma, bs, hdr, schemaname, tablename,
(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
FROM null_headers
),
table_estimates AS (
-- 행과 페이지 크기를 기준으로 테이블 크기를 추정합니다
-- make estimates of how large the table should be
-- based on row and page size
SELECT schemaname, tablename, bs,
reltuples::numeric as est_rows, relpages * bs as table_bytes,
CEIL((reltuples*
(datahdr + nullhdr2 + 4 + ma -
(CASE WHEN datahdr%ma=0
THEN ma ELSE datahdr%ma END)
)/(bs-20))) * bs AS expected_bytes,
reltoastrelid
FROM data_headers
JOIN pg_class ON tablename = relname
JOIN pg_namespace ON relnamespace = pg_namespace.oid
AND schemaname = nspname
WHERE pg_class.relkind = 'r'
),
estimates_with_toast AS (
-- 예상 TOAST 테이블 크기 추가
-- 페이지당 4개의 토스트 튜플을 기준으로 추정. no_data 테이블을 추가
-- add in estimated TOAST table sizes
-- estimate based on 4 toast tuples per page because we dont have
-- anything better. also append the no_data tables
SELECT schemaname, tablename,
TRUE as can_estimate,
est_rows,
table_bytes + ( coalesce(toast.relpages, 0) * bs ) as table_bytes,
expected_bytes + ( ceil( coalesce(toast.reltuples, 0) / 4 ) * bs ) as expected_bytes
FROM table_estimates LEFT OUTER JOIN pg_class as toast
ON table_estimates.reltoastrelid = toast.oid
AND toast.relkind = 't'
),
table_estimates_plus AS (
-- 추정할 수 없는지 또는 압축될 수 있다고 생각하는지 여부를 포함하여
-- 재사용할 테이블 데이터 및 계산에 몇 가지 추가 메타데이터를 추가.
-- add some extra metadata to the table data
-- and calculations to be reused
-- including whether we cant estimate it
-- or whether we think it might be compressed
SELECT current_database() as databasename,
schemaname, tablename, can_estimate,
est_rows,
CASE WHEN table_bytes > 0
THEN table_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS table_bytes,
CASE WHEN expected_bytes > 0
THEN expected_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS expected_bytes,
CASE WHEN expected_bytes > 0 AND table_bytes > 0
AND expected_bytes <= table_bytes
THEN (table_bytes - expected_bytes)::NUMERIC
ELSE 0::NUMERIC END AS bloat_bytes
FROM estimates_with_toast
UNION ALL
SELECT current_database() as databasename,
table_schema, table_name, FALSE,
est_rows, table_size,
NULL::NUMERIC, NULL::NUMERIC
FROM no_stats
),
bloat_data AS (
-- do final math calculations and formatting
select current_database() as databasename,
schemaname, tablename, can_estimate,
table_bytes, round(table_bytes/(1024^2)::NUMERIC,3) as table_mb,
expected_bytes, round(expected_bytes/(1024^2)::NUMERIC,3) as expected_mb,
round(bloat_bytes*100/table_bytes) as pct_bloat,
round(bloat_bytes/(1024::NUMERIC^2),2) as mb_bloat,
table_bytes, expected_bytes, est_rows
FROM table_estimates_plus
)
-- filter output for bloated tables
SELECT databasename, schemaname, tablename,
can_estimate,
est_rows,
pct_bloat /* percent */, mb_bloat /* bloat size */,
table_mb
FROM bloat_data
-- this where clause defines which tables actually appear
-- in the bloat chart
-- example below filters for tables which are either 50%
-- bloated and more than 20mb in size, or more than 25%
-- bloated and more than 1GB in size
WHERE ( pct_bloat >= 50 AND mb_bloat >= 20 )
OR ( pct_bloat >= 25 AND mb_bloat >= 1000 )
ORDER BY pct_bloat DESC;
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- 2-2. index bloat 조회
-- btree index stats query
-- estimates bloat for btree indexes
WITH btree_index_atts AS (
SELECT nspname,
indexclass.relname as index_name,
indexclass.reltuples,
indexclass.relpages,
indrelid, indexrelid,
indexclass.relam,
tableclass.relname as tablename,
regexp_split_to_table(indkey::text, ' ')::smallint AS attnum,
indexrelid as index_oid
FROM pg_index
JOIN pg_class AS indexclass ON pg_index.indexrelid = indexclass.oid
JOIN pg_class AS tableclass ON pg_index.indrelid = tableclass.oid
JOIN pg_namespace ON pg_namespace.oid = indexclass.relnamespace
JOIN pg_am ON indexclass.relam = pg_am.oid
WHERE pg_am.amname = 'btree' and indexclass.relpages > 0
AND nspname NOT IN ('pg_catalog','information_schema')
),
index_item_sizes AS (
SELECT
ind_atts.nspname, ind_atts.index_name,
ind_atts.reltuples, ind_atts.relpages, ind_atts.relam,
indrelid AS table_oid, index_oid,
current_setting('block_size')::numeric AS bs,
8 AS maxalign,
24 AS pagehdr,
CASE WHEN max(coalesce(pg_stats.null_frac,0)) = 0
THEN 2
ELSE 6
END AS index_tuple_hdr,
sum( (1-coalesce(pg_stats.null_frac, 0)) * coalesce(pg_stats.avg_width, 1024) ) AS nulldatawidth
FROM pg_attribute
JOIN btree_index_atts AS ind_atts ON pg_attribute.attrelid = ind_atts.indexrelid AND pg_attribute.attnum = ind_atts.attnum
JOIN pg_stats ON pg_stats.schemaname = ind_atts.nspname
-- stats for regular index columns
AND ( (pg_stats.tablename = ind_atts.tablename AND pg_stats.attname = pg_catalog.pg_get_indexdef(pg_attribute.attrelid, pg_attribute.attnum, TRUE))
-- stats for functional indexes
OR (pg_stats.tablename = ind_atts.index_name AND pg_stats.attname = pg_attribute.attname))
WHERE pg_attribute.attnum > 0
GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9
),
index_aligned_est AS (
SELECT maxalign, bs, nspname, index_name, reltuples,
relpages, relam, table_oid, index_oid,
coalesce (
ceil (
reltuples * ( 6
+ maxalign
- CASE
WHEN index_tuple_hdr%maxalign = 0 THEN maxalign
ELSE index_tuple_hdr%maxalign
END
+ nulldatawidth
+ maxalign
- CASE /* Add padding to the data to align on MAXALIGN */
WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign
ELSE nulldatawidth::integer%maxalign
END
)::numeric
/ ( bs - pagehdr::NUMERIC )
+1 )
, 0 )
as expected
FROM index_item_sizes
),
raw_bloat AS (
SELECT current_database() as dbname, nspname, pg_class.relname AS table_name, index_name,
bs*(index_aligned_est.relpages)::bigint AS totalbytes, expected,
CASE
WHEN index_aligned_est.relpages <= expected
THEN 0
ELSE bs*(index_aligned_est.relpages-expected)::bigint
END AS wastedbytes,
CASE
WHEN index_aligned_est.relpages <= expected
THEN 0
ELSE bs*(index_aligned_est.relpages-expected)::bigint * 100 / (bs*(index_aligned_est.relpages)::bigint)
END AS realbloat,
pg_relation_size(index_aligned_est.table_oid) as table_bytes,
stat.idx_scan as index_scans
FROM index_aligned_est
JOIN pg_class ON pg_class.oid=index_aligned_est.table_oid
JOIN pg_stat_all_indexes AS stat ON index_aligned_est.index_oid = stat.indexrelid
),
format_bloat AS (
SELECT dbname as database_name, nspname as schema_name, table_name, index_name,
round(realbloat) as bloat_pct, round(wastedbytes/(1024^2)::NUMERIC) as bloat_mb,
round(totalbytes/(1024^2)::NUMERIC,3) as index_mb,
round(table_bytes/(1024^2)::NUMERIC,3) as table_mb,
index_scans
FROM raw_bloat
)
-- final query outputting the bloated indexes
-- change the where and order by to change
-- what shows up as bloated
SELECT *
FROM format_bloat
WHERE ( bloat_pct > 50 and bloat_mb > 10 )
ORDER BY bloat_pct DESC;
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- 3. 전체 스키마 크기 출력 [스키마 명 / 디스크 공간 / 할당된 DB 사이즈 / 사용 percent]
SELECT schema_name
, pg_size_pretty(sum(table_size)::bigint) as "disk space"
, pg_size_pretty(pg_database_size(current_database())) as "DB_size"
, (sum(table_size) / pg_database_size(current_database())) * 100 as "percent (%)"
FROM (SELECT pg_catalog.pg_namespace.nspname as schema_name
, pg_relation_size(pg_catalog.pg_class.oid) as table_size
FROM pg_catalog.pg_class
JOIN pg_catalog.pg_namespace
ON relnamespace = pg_catalog.pg_namespace.oid) t
GROUP BY schema_name
ORDER BY schema_name;
-- 특정 Schema의 테이블 별 사이즈 확인
SELECT nspname || '.' || relname AS "relation"
, pg_size_pretty(pg_table_size(C.oid)) AS "size_table(sum all relations)" -- sum of all the return values of pg_relation_size
, pg_size_pretty(pg_relation_size(C.oid)) AS "size_relation"
, pg_size_pretty(pg_indexes_size(C.oid)) AS "size_index"
, pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size(table + indexes)" -- sum of pg_table_size and pg_indexes_size
FROM pg_class C
LEFT JOIN pg_namespace N
ON (N.oid = C.relnamespace)
WHERE nspname IN (:schema_name)
and C.relkind <> 'i'
AND nspname !~ '^pg_toast'
ORDER BY pg_relation_size(C.oid) desc;
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- 4. 현 상태 조회
-- 실행 중인 Long 쿼리 확인
SELECT current_timestamp - query_start AS runtime
, state, datname, usename, query
FROM pg_stat_activity
WHERE 1=1
-- AND state = 'active'
AND current_timestamp - query_start > '5 min'
ORDER BY 1 DESC;
/* 수행시간이 제일 길었던 쿼리를 상위 50개만 조회 https://www.postgresql.org/docs/16/pgstatstatements.html */
select b.usename
, c.datname
, a.query as query
, a.calls as "실행 회수"
, 100.0 * shared_blks_hit / nullif(shared_blks_hit + shared_blks_read, 0) AS hit_percent
, case when to_char(a.mean_exec_time * interval '1 millisecond', 'hh24')::integer <> 00 then to_char(a.mean_exec_time * interval '1 millisecond', 'hh24') || 'h ' else '' end
|| case when to_char(a.mean_exec_time * interval '1 millisecond', 'mi')::integer <> 00 then to_char(a.mean_exec_time * interval '1 millisecond', 'mi') || 'm ' else '' end
|| to_char(a.mean_exec_time * interval '1 millisecond', 'sss') as "평균 실행 시간"
, case when to_char(a.total_exec_time * interval '1 millisecond', 'hh24')::integer <> 00 then to_char(a.total_exec_time * interval '1 millisecond', 'hh24') || 'h ' else '' end
|| case when to_char(a.total_exec_time * interval '1 millisecond', 'mi')::integer <> 00 then to_char(a.total_exec_time * interval '1 millisecond', 'mi') || 'm ' else '' end
|| to_char(a.total_exec_time * interval '1 millisecond', 'sss') as "총 누적 시간"
, case when to_char(a.min_exec_time * interval '1 millisecond', 'hh24')::integer <> 00 then to_char(a.min_exec_time * interval '1 millisecond', 'hh24') || 'h ' else '' end
|| case when to_char(a.min_exec_time * interval '1 millisecond', 'mi')::integer <> 00 then to_char(a.min_exec_time * interval '1 millisecond', 'mi') || 'm ' else '' end
|| to_char(a.min_exec_time * interval '1 millisecond', 'sss') as "min 실행 시간"
, case when to_char(a.max_exec_time * interval '1 millisecond', 'hh24')::integer <> 00 then to_char(a.max_exec_time * interval '1 millisecond', 'hh24') || 'h ' else '' end
|| case when to_char(a.max_exec_time * interval '1 millisecond', 'mi')::integer <> 00 then to_char(a.max_exec_time * interval '1 millisecond', 'mi') || 'm ' else '' end
|| to_char(a.max_exec_time * interval '1 millisecond', 'sss') as "max 실행 시간"
, case when to_char(a.stddev_exec_time * interval '1 millisecond', 'hh24')::integer <> 00 then to_char(a.stddev_exec_time * interval '1 millisecond', 'hh24') || 'h ' else '' end
|| case when to_char(a.stddev_exec_time * interval '1 millisecond', 'mi')::integer <> 00 then to_char(a.stddev_exec_time * interval '1 millisecond', 'mi') || 'm ' else '' end
|| to_char(a.stddev_exec_time * interval '1 millisecond', 'sss') as "실행 표준 편차"
, a.rows as "영향 받는 총 행수"
, a.userid
, a.queryid
, a.dbid
from pg_stat_statements a
join pg_catalog.pg_user b
on a.userid = b.usesysid
join pg_catalog.pg_stat_database c
on a.dbid = c.datid
order by a.mean_exec_time desc
limit 50
;
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- cache buffer 사용량 조회. pg_buffercache extension 필요.
SELECT
c.relname,
sum(usagecount) AS usage_count,
/* count(*) is the number of buffers in use. Each buffer is 8Kb. (8Kb is the page size
a.k.a. block size configured at compile time, confirm in psql with the command
`show block_size`).
count(*)는 사용 중인 버퍼의 수입니다. 각 버퍼는 8Kb입니다(8Kb는 페이지 크기입니다.
컴파일 타임에 구성된 블록 크기라고도 하며, psql에서 다음 명령을 사용하여 확인합니다.
'block_size 표시')
I cast the count to float to introduce a float into calculations that are otherwise all int
which would produce a result rounded to the nearest int.
카운트를 float로 캐스팅하여 그렇지 않으면 모두 int 인 계산에 float를 도입합니다
가장 가까운 int로 반올림된 결과를 생성합니다.
*/
-- cache_% shows the portion of the cache that this entity occupies
-- cache_%는 이 엔터티가 차지하는 캐시 부분을 나타냅니다
((count(*)::float / pg_settings.setting::int) * 100)::numeric(3, 1) AS "cache_%",
-- entity_% shows the portion of this entity that's in cache
-- pg_table_size() is in bytes, multiply buffer count by (8 * 1024) so units match
-- entity_%는 이 엔티티 중 캐시에 있는 부분을 표시합니다.
-- pg_table_size()는 바이트 단위이므로 버퍼 수에 (8 * 1024)를 곱하여 단위가 일치하도록 합니다.
(((count(*)::float * 8192) / pg_table_size(c.relname::text)) * 100)::numeric(4,1)
AS "entity_%",
(count(*)::float * 8 / (1024 * 1024))::numeric(20, 1) AS Gb,
(count(*)::float * 8 / 1024 )::numeric(20, 1) AS Mb
FROM
pg_buffercache b
CROSS JOIN pg_settings
INNER JOIN pg_class c
ON b.relfilenode = pg_relation_filenode(c.oid)
AND b.reldatabase IN (0, (SELECT oid FROM pg_database WHERE datname = current_database()))
WHERE
pg_settings.name = 'shared_buffers'
-- If this is run on a system where shared_buffers is expressed in something other than 8kB
-- blocks, it will calculate incorrect kB/Mb/Gb values. Asserting the value of the unit here
-- ensures no results at all will be returned in that case.
-- shared_buffers가 8kB 블록이 아닌 다른 것으로 표현되는 시스템에서 실행하면 잘못된 kB/Mb/Gb 값이 계산됩니다.
-- 여기서 단위 값을 확인하면 해당 경우 결과가 전혀 반환되지 않습니다.
AND pg_settings.unit = '8kB'
GROUP BY
c.relname, pg_settings.setting::int
HAVING
-- -- 캐시의 최소 0.1%를 차지하는 항목만 포함
-- -- Only include entries that occupy at least 0.1% of the cache
((count(*)::float / pg_settings.setting::int) * 100) >= 0.1
ORDER BY 6 desc;
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-- vacuum
-- VACUUM (FULL) schema.tablename;
-- vacuum 임계치를 넘어선 테이블 리스트
WITH vbt AS (
SELECT
setting AS autovacuum_vacuum_threshold
FROM
pg_settings
WHERE
name = 'autovacuum_vacuum_threshold'
),
vsf AS (
SELECT
setting AS autovacuum_vacuum_scale_factor
FROM
pg_settings
WHERE
name = 'autovacuum_vacuum_scale_factor'
),
fma AS (
SELECT
setting AS autovacuum_freeze_max_age
FROM
pg_settings
WHERE
name = 'autovacuum_freeze_max_age'
),
sto AS (
select
opt_oid,
split_part(setting, '=', 1) as param,
split_part(setting, '=', 2) as value
from
(
select
oid opt_oid,
unnest(reloptions) setting
from
pg_class
) opt
)
SELECT
'"' || ns.nspname || '"."' || c.relname || '"' as relation,
pg_size_pretty(
pg_table_size(c.oid)
) as table_size,
age(relfrozenxid) as xid_age,
coalesce(
cfma.value :: float, autovacuum_freeze_max_age :: float
) autovacuum_freeze_max_age,
(
coalesce(
cvbt.value :: float, autovacuum_vacuum_threshold :: float
) + coalesce(
cvsf.value :: float, autovacuum_vacuum_scale_factor :: float
) * c.reltuples
) as autovacuum_vacuum_tuples,
n_dead_tup as dead_tuples
FROM
pg_class c
join pg_namespace ns on ns.oid = c.relnamespace
join pg_stat_all_tables stat on stat.relid = c.oid
join vbt on (1 = 1)
join vsf on (1 = 1)
join fma on (1 = 1)
left join sto cvbt on cvbt.param = 'autovacuum_vacuum_threshold'
and c.oid = cvbt.opt_oid
left join sto cvsf on cvsf.param = 'autovacuum_vacuum_scale_factor'
and c.oid = cvsf.opt_oid
left join sto cfma on cfma.param = 'autovacuum_freeze_max_age'
and c.oid = cfma.opt_oid
WHERE
c.relkind = 'r'
and nspname <> 'pg_catalog'
and (
age(relfrozenxid) >= coalesce(
cfma.value :: float, autovacuum_freeze_max_age :: float
)
or coalesce(
cvbt.value :: float, autovacuum_vacuum_threshold :: float
) + coalesce(
cvsf.value :: float, autovacuum_vacuum_scale_factor :: float
) * c.reltuples <= n_dead_tup -- or 1 = 1
) ORDER BY age(relfrozenxid) DESC LIMIT 50;
'DB > PostgreSQL' 카테고리의 다른 글
[PostgreSQL] now(), clock_timestamp(), statement_timestamp() 차이 (2) | 2024.01.03 |
---|---|
[PostgreSQL - DBA] pg_hint_plan 설치하기 (pg_Hint 3/3) (1) | 2023.11.06 |
[PostgreSQL - DBA] wsl에 PostgreSQL 설치하기(pg_dumpall로 백업하기) (pg_Hint 2/3) (3) | 2023.10.25 |
[PostgreSQL - DBA] 오브젝트 생성 시 기본 권한 자동 부여(alter default privileges) (0) | 2023.10.20 |
[PostgreSQL - DBA] FK Option (1) | 2023.10.16 |