검색결과 리스트
글
table/index bloat 점검하기
table/index bloat 은 말그대로 테이블 또는 인덱스의 물리크기가 비정상적으로 부풀어 있는 경우를 말합니다. table bloat 이 발생할 경우 불필요한 IO 비용이 수반되고 스토리지 낭비를 초래하는 postgresql의 주요 관리 factor 중 하나로써 auto vacuum 기능이 꺼져 있거나 관련 파라메터가 적절하지 않을 경우 발생할 수 있습니다. 아래는 index/table의 bloating 정도를 점검하는 SQL입니다.
1. index_bloat_check.sql
WITH btree_index_atts AS (
SELECT nspname, relname, reltuples, relpages, indrelid, relam,
regexp_split_to_table(indkey::text, ' ')::smallint AS attnum,
indexrelid as index_oid
FROM pg_index
JOIN pg_class ON pg_class.oid=pg_index.indexrelid
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
JOIN pg_am ON pg_class.relam = pg_am.oid
WHERE pg_am.amname = 'btree'
),
index_item_sizes AS (
SELECT
i.nspname, i.relname, i.reltuples, i.relpages, i.relam,
s.starelid, a.attrelid AS table_oid, index_oid,
current_setting('block_size')::numeric AS bs,
8 AS maxalign,
24 AS pagehdr,
/* per tuple header: add index_attribute_bm if some cols are null-able */
CASE WHEN max(coalesce(s.stanullfrac,0)) = 0
THEN 2
ELSE 6
END AS index_tuple_hdr,
/* data len: we remove null values save space using it fractionnal part from stats */
sum( (1-coalesce(s.stanullfrac, 0)) * coalesce(s.stawidth, 2048) ) AS nulldatawidth
FROM pg_attribute AS a
JOIN pg_statistic AS s ON s.starelid=a.attrelid AND s.staattnum = a.attnum
JOIN btree_index_atts AS i ON i.indrelid = a.attrelid AND a.attnum = i.attnum
WHERE a.attnum > 0
GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9
),
index_aligned AS (
SELECT maxalign, bs, nspname, relname AS index_name, reltuples,
relpages, relam, table_oid, index_oid,
( 2 +
maxalign - CASE /* Add padding to the index tuple header to align on MAXALIGN */
WHEN index_tuple_hdr%maxalign = 0 THEN maxalign
ELSE index_tuple_hdr%maxalign
END
+ nulldatawidth + maxalign - CASE /* Add padding to the data to align on MAXALIGN */
WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign
ELSE nulldatawidth::integer%maxalign
END
)::numeric AS nulldatahdrwidth, pagehdr
FROM index_item_sizes AS s1
),
otta_calc AS (
SELECT bs, nspname, table_oid, index_oid, index_name, relpages, coalesce(
ceil((reltuples*(4+nulldatahdrwidth))/(bs-pagehdr::float)) +
CASE WHEN am.amname IN ('hash','btree') THEN 1 ELSE 0 END , 0 -- btree and hash have a metadata reserved block
) AS otta
FROM index_aligned AS s2
LEFT JOIN pg_am am ON s2.relam = am.oid
),
raw_bloat AS (
SELECT current_database() as dbname, nspname, c.relname AS table_name, index_name,
bs*(sub.relpages)::bigint AS totalbytes, otta as expected,
CASE
WHEN sub.relpages <= otta THEN 0
ELSE bs*(sub.relpages-otta)::bigint END
AS wastedbytes,
CASE
WHEN sub.relpages <= otta
THEN 0 ELSE bs*(sub.relpages-otta)::bigint * 100 / (bs*(sub.relpages)::bigint) END
AS realbloat,
pg_relation_size(sub.table_oid) as table_bytes,
stat.idx_scan as index_scans
FROM otta_calc AS sub
JOIN pg_class AS c ON c.oid=sub.table_oid
JOIN pg_stat_user_indexes AS stat ON sub.index_oid = stat.indexrelid
),
format_bloat AS (
SELECT dbname as database_name, nspname as schema_name, table_name, index_name,
round(realbloat) as bloat_pct, round(wastedbytes/(1024^2)::NUMERIC) as bloat_mb,
round(totalbytes/(1024^2)::NUMERIC,3) as index_mb,
round(table_bytes/(1024^2)::NUMERIC,3) as table_mb,
index_scans
FROM raw_bloat
)
-- final query outputting the bloated indexes
-- change the where and order by to change
-- what shows up as bloated
SELECT *
FROM format_bloat
WHERE ( bloat_pct > 50 and bloat_mb > 10 )
ORDER BY bloat_mb DESC;
2. table_bloat_check.sql
-- new table bloat query
-- still needs work; is often off by +/- 20%
WITH constants AS (
-- define some constants for sizes of things
-- for reference down the query and easy maintenance
SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 8 AS ma
),
no_stats AS (
-- screen out table who have attributes
-- which dont have stats, such as JSON
SELECT table_schema, table_name
FROM information_schema.columns
LEFT OUTER JOIN pg_stats
ON table_schema = schemaname
AND table_name = tablename
AND column_name = attname
WHERE attname IS NULL
AND table_schema NOT IN ('pg_catalog', 'information_schema')
GROUP BY table_schema, table_name
),
null_headers AS (
-- calculate null header sizes
-- omitting tables which dont have complete stats
-- and attributes which aren't visible
SELECT
hdr+1+(sum(case when null_frac <> 0 THEN 1 else 0 END)/8) as nullhdr,
SUM((1-null_frac)*avg_width) as datawidth,
MAX(null_frac) as maxfracsum,
schemaname,
tablename,
hdr, ma, bs
FROM pg_stats CROSS JOIN constants
LEFT OUTER JOIN no_stats
ON schemaname = no_stats.table_schema
AND tablename = no_stats.table_name
WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
AND no_stats.table_name IS NULL
AND EXISTS ( SELECT 1
FROM information_schema.columns
WHERE schemaname = columns.table_schema
AND tablename = columns.table_name )
GROUP BY schemaname, tablename, hdr, ma, bs
),
data_headers AS (
-- estimate header and row size
SELECT
ma, bs, hdr, schemaname, tablename,
(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
FROM null_headers
),
table_estimates AS (
-- make estimates of how large the table should be
-- based on row and page size
SELECT schemaname, tablename, bs,
reltuples, relpages * bs as table_bytes,
CEIL((reltuples*
(datahdr + nullhdr2 + 4 + ma -
(CASE WHEN datahdr%ma=0
THEN ma ELSE datahdr%ma END)
)/(bs-20))) * bs AS expected_bytes
FROM data_headers
JOIN pg_class ON tablename = relname
JOIN pg_namespace ON relnamespace = pg_namespace.oid
AND schemaname = nspname
WHERE pg_class.relkind = 'r'
),
table_estimates_plus AS (
-- add some extra metadata to the table data
-- and calculations to be reused
-- including whether we cant estimate it
-- or whether we think it might be compressed
SELECT current_database() as databasename,
schemaname, tablename, reltuples as est_rows,
CASE WHEN expected_bytes > 0 AND table_bytes > 0 THEN
TRUE ELSE FALSE END as can_estimate,
CASE WHEN expected_bytes > table_bytes THEN
TRUE ELSE FALSE END as is_compressed,
CASE WHEN table_bytes > 0
THEN table_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS table_bytes,
CASE WHEN expected_bytes > 0
THEN expected_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS expected_bytes,
CASE WHEN expected_bytes > 0 AND table_bytes > 0
AND expected_bytes <= table_bytes
THEN (table_bytes - expected_bytes)::NUMERIC
ELSE 0::NUMERIC END AS bloat_bytes
FROM table_estimates
),
bloat_data AS (
-- do final math calculations and formatting
select current_database() as databasename,
schemaname, tablename, can_estimate, is_compressed,
table_bytes, round(table_bytes/(1024^2)::NUMERIC,3) as table_mb,
expected_bytes, round(expected_bytes/(1024^2)::NUMERIC,3) as expected_mb,
round(bloat_bytes*100/table_bytes) as pct_bloat,
round(bloat_bytes/(1024::NUMERIC^2),2) as mb_bloat,
table_bytes, expected_bytes
FROM table_estimates_plus
)
-- filter output for bloated tables
SELECT databasename, schemaname, tablename,
--can_estimate, is_compressed,
pct_bloat, mb_bloat,
table_mb
FROM bloat_data
-- this where clause defines which tables actually appear
-- in the bloat chart
-- example below filters for tables which are either 50%
-- bloated and more than 20mb in size, or more than 25%
-- bloated and more than 4GB in size
WHERE ( pct_bloat >= 50 AND mb_bloat >= 10 )
OR ( pct_bloat >= 25 AND mb_bloat >= 1000 )
ORDER BY pct_bloat DESC;
'POSTGRESQL' 카테고리의 다른 글
max_connections 와 커널파라메터 (0) | 2014.10.27 |
---|---|
postgresql DBLINK 예제 (0) | 2014.10.27 |
gdb로 postgresql backend process 디버깅하기 (0) | 2014.10.26 |
pg_trgm 설치하기 (1) | 2014.10.26 |
prepare statements 와 casting (0) | 2014.10.26 |