sql 有用的Postgres查询

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了sql 有用的Postgres查询相关的知识,希望对你有一定的参考价值。

-- Use it to generate a database migration (e.g. RoR's db:migrate or Sqitch)
-- to drop unused and redundant indexes.


-- This query generate a set of `DROP INDEX` statements, that
-- can be used in your migration script. Also, it generates
-- `CREATE INDEX`, put them to revert/rollback migration script.

-- All lines are prepended with "NOTICE: ", so you need manually delete it.

-- It is also a good idea to manually double check all indexes being dropped.

-- This query doesn't need any additional extensions to be installed
-- (except plpgsql), and doesn't create anything (like views or smth)
-- -- so feel free to use it in your clouds (Heroku, AWS RDS, etc)

-- It also does't do anything except reading system catalogs and
-- printing NOTICEs, so you can easily run it on your
--  production *master* database.
-- (Keep in mind, that on replicas, the whole picture of index usage 
-- is usually very different from master).

do $$
declare
  r record;
  str text;
  output_drop text[];
  output_create text[];
  tblname text;
begin
  tblname := NULL;
  output_drop := '{}'::text[];
  output_create := '{}'::text[];
  for r in
    with unused as (
      select
          'unused'::text as reason,
          format('idx_scan: %s, ', pg_stat_user_indexes.idx_scan)::text as details,
          pg_stat_user_indexes.schemaname||'.'||pg_stat_user_indexes.relname as tablename,
          indexrelname as indexname,
          pg_stat_user_indexes.idx_scan,
          (coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0)) as write_activity,
          pg_stat_user_tables.seq_scan,
          pg_stat_user_tables.n_live_tup,
          pg_get_indexdef(pg_index.indexrelid) as indexdef,
          pg_size_pretty(pg_relation_size(pg_index.indexrelid::regclass)) as size
      from pg_stat_user_indexes
      join pg_stat_user_tables
          on pg_stat_user_indexes.relid=pg_stat_user_tables.relid
      join pg_index
          ON pg_index.indexrelid=pg_stat_user_indexes.indexrelid
      where
          pg_stat_user_indexes.idx_scan = 0 /* < 10 or smth */
          and pg_index.indisunique is false
          and pg_stat_user_indexes.idx_scan::float/(coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0)+1)::float<0.01
          and (coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0))>10000
    ), index_data as (
      select *, string_to_array(indkey::text,' ') as key_array,array_length(string_to_array(indkey::text,' '),1) as nkeys
      from pg_index
    ), redundant as (
      select
        'redundant'::text as reason,
        format('redundant to this index: %I', i1.indexrelid::regclass)::text as details,
        i2.indrelid::regclass::text as tablename,
        i2.indexrelid::regclass::text as indexname,
        pg_get_indexdef(i1.indexrelid) main_indexdef,
        pg_get_indexdef(i2.indexrelid) indexdef,
        pg_size_pretty(pg_relation_size(i2.indexrelid)) size
      from
        index_data as i1
        join index_data as i2 on i1.indrelid=i2.indrelid and i1.indexrelid <> i2.indexrelid
        left join pg_stat_user_indexes on pg_stat_user_indexes.relid = i2.indexrelid
      where
        (regexp_replace(i1.indpred, 'location \d+', 'location', 'g') IS NOT DISTINCT FROM regexp_replace(i2.indpred, 'location \d+', 'location', 'g'))
        and (regexp_replace(i1.indexprs, 'location \d+', 'location', 'g') IS NOT DISTINCT FROM regexp_replace(i2.indexprs, 'location \d+', 'location', 'g')) 
        and ((i1.nkeys > i2.nkeys and not i2.indisunique) OR (i1.nkeys=i2.nkeys and ((i1.indisunique and i2.indisunique and (i1.indexrelid>i2.indexrelid)) or (not i1.indisunique and not i2.indisunique and (i1.indexrelid>i2.indexrelid)) or (i1.indisunique and not i2.indisunique))))
        and i1.key_array[1:i2.nkeys]=i2.key_array 
    ), together as (
      select reason, details, tablename, indexname, size, indexdef, null as main_indexdef
      from unused
      union all
      select reason, details, tablename, indexname, size, indexdef, main_indexdef
      from redundant
    )
    select 
      tablename, indexname, size, 
      string_agg(reason, ', ') as reasons,
      string_agg(details, '; ') as details, 
      max(indexdef) as indexdef, -- always 1 value
      string_agg(main_indexdef, '; ') as main_indexdef-- several values are possible here
    from together
    group by tablename, indexname, size
    order by 1,2
  loop
    if tblname <> r.tablename then
      if tblname is not null then
        output_drop := array_append(output_drop, '');
        output_create := array_append(output_create, '');
      end if;
      output_drop := array_append(output_drop, format('-- Table: %I', r.tablename));
      output_create := array_append(output_create, format('-- Table: %I', r.tablename));
    end if;
    output_drop := array_append(output_drop, format('DROP INDEX %I; -- %s, %s', r.indexname, r.size, r.reasons));
    output_create := array_append(output_create, format('%s; -- %s, %s', r.indexdef, r.size, r.reasons));
    tblname := r.tablename;
  end loop;

  raise notice '-- ***** Drop unused/redundant indexes *****';
  foreach str in array output_drop loop
    raise notice '%', str;
  end loop;

  raise notice '';
  raise notice '';
  raise notice '-- ***** Recreate indexes (rollback migration) *****';
  foreach str in array output_create loop
    raise notice '%', str;
  end loop;
end; 
$$ language plpgsql;
--https://github.com/dataegret/pg-utils/tree/master/sql

SELECT
        pg_stat_user_indexes.schemaname||'.'||pg_stat_user_indexes.relname as tablename,
        indexrelname,
        pg_stat_user_indexes.idx_scan,
        (coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0)) as write_activity,
        pg_stat_user_tables.seq_scan,
        pg_stat_user_tables.n_live_tup,
	pg_size_pretty(pg_relation_size(pg_index.indexrelid::regclass)) as size
from pg_stat_user_indexes
join pg_stat_user_tables
        on pg_stat_user_indexes.relid=pg_stat_user_tables.relid
join pg_index
        ON pg_index.indexrelid=pg_stat_user_indexes.indexrelid
where
        pg_index.indisunique is false
        and pg_stat_user_indexes.idx_scan::float/(coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0)+1)::float<0.01
        and (coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0))>10000
order by 4 desc,1,2;
--https://github.com/dataegret/pg-utils/tree/master/sql
--pgstattuple extension required
--WARNING: without index name/mask query will read all available indexes which could cause I/O spikes
with indexes as (
    select * from pg_stat_user_indexes
)
select schemaname,
table_name,
pg_size_pretty(table_size) as table_size,
index_name,
pg_size_pretty(index_size) as index_size,
idx_scan as index_scans,
round((free_space*100/index_size)::numeric, 1) as waste_percent,
pg_size_pretty(free_space) as waste
from (
    select schemaname, p.relname as table_name, indexrelname as index_name,
    (select (case when avg_leaf_density = 'NaN' then 0 
        else greatest(ceil(index_size * (1 - avg_leaf_density / (coalesce((SELECT (regexp_matches(reloptions::text, E'.*fillfactor=(\\d+).*'))[1]),'90')::real)))::bigint, 0) end)
        from pgstatindex(schemaname || '.' || p.indexrelid::regclass::text)
    ) as free_space,
    pg_relation_size(p.indexrelid) as index_size,
    pg_relation_size(p.relid) as table_size,
    idx_scan
    from indexes p
    join pg_class c on p.indexrelid = c.oid
    where pg_get_indexdef(p.indexrelid) like '%USING btree%' and
    --put your index name/mask here
    indexrelname ~ ''
) t
order by free_space desc;
--https://github.com/dataegret/pg-utils/tree/master/sql
--pgstattuple extension required
--WARNING: without table name/mask query will read all available tables which could cause I/O spikes
select nspname,
relname,
pg_size_pretty(relation_size + toast_relation_size) as total_size,
pg_size_pretty(toast_relation_size) as toast_size,
round(((relation_size - (relation_size - free_space)*100/fillfactor)*100/greatest(relation_size, 1))::numeric, 1) table_waste_percent,
pg_size_pretty((relation_size - (relation_size - free_space)*100/fillfactor)::bigint) table_waste,
round(((toast_free_space + relation_size - (relation_size - free_space)*100/fillfactor)*100/greatest(relation_size + toast_relation_size, 1))::numeric, 1) total_waste_percent,
pg_size_pretty((toast_free_space + relation_size - (relation_size - free_space)*100/fillfactor)::bigint) total_waste
from (
    select nspname, relname,
    (select free_space from pgstattuple(c.oid)) as free_space,
    pg_relation_size(c.oid) as relation_size,
    (case when reltoastrelid = 0 then 0 else (select free_space from pgstattuple(c.reltoastrelid)) end) as toast_free_space,
    coalesce(pg_relation_size(c.reltoastrelid), 0) as toast_relation_size,
    coalesce((SELECT (regexp_matches(reloptions::text, E'.*fillfactor=(\\d+).*'))[1]),'100')::real AS fillfactor
    from pg_class c
    left join pg_namespace n on (n.oid = c.relnamespace)
    where nspname not in ('pg_catalog', 'information_schema')
    and nspname !~ '^pg_toast' and relkind = 'r'
    --put your table name/mask here
    and relname ~ ''
) t
order by (toast_free_space + relation_size - (relation_size - free_space)*100/fillfactor) desc
limit 20;
-- from https://wiki.postgresql.org/wiki/Show_database_bloat
SELECT
  current_database(), schemaname, tablename, /*reltuples::bigint, relpages::bigint, otta,*/
  ROUND((CASE WHEN otta=0 THEN 0.0 ELSE sml.relpages::FLOAT/otta END)::NUMERIC,1) AS tbloat,
  CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::BIGINT END AS wastedbytes,
  iname, /*ituples::bigint, ipages::bigint, iotta,*/
  ROUND((CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages::FLOAT/iotta END)::NUMERIC,1) AS ibloat,
  CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes
FROM (
  SELECT
    schemaname, tablename, cc.reltuples, cc.relpages, bs,
    CEIL((cc.reltuples*((datahdr+ma-
      (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::FLOAT)) AS otta,
    COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
    COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::FLOAT)),0) AS iotta -- very rough approximation, assumes all cols
  FROM (
    SELECT
      ma,bs,schemaname,tablename,
      (datawidth+(hdr+ma-(CASE WHEN hdr%ma=0 THEN ma ELSE hdr%ma END)))::NUMERIC AS datahdr,
      (maxfracsum*(nullhdr+ma-(CASE WHEN nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
    FROM (
      SELECT
        schemaname, tablename, hdr, ma, bs,
        SUM((1-null_frac)*avg_width) AS datawidth,
        MAX(null_frac) AS maxfracsum,
        hdr+(
          SELECT 1+COUNT(*)/8
          FROM pg_stats s2
          WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename
        ) AS nullhdr
      FROM pg_stats s, (
        SELECT
          (SELECT current_setting('block_size')::NUMERIC) AS bs,
          CASE WHEN SUBSTRING(v,12,3) IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr,
          CASE WHEN v ~ 'mingw32' THEN 8 ELSE 4 END AS ma
        FROM (SELECT version() AS v) AS foo
      ) AS constants
      GROUP BY 1,2,3,4,5
    ) AS foo
  ) AS rs
  JOIN pg_class cc ON cc.relname = rs.tablename
  JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema'
  LEFT JOIN pg_index i ON indrelid = cc.oid
  LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
) AS sml
ORDER BY wastedbytes DESC;
--https://github.com/dataegret/pg-utils/tree/master/sql
-- Show shared_buffers and os pagecache stat for current database
-- Require pg_buffercache and pgfincore
WITH qq AS (SELECT
    c.oid,
    count(b.bufferid) * 8192 AS size,
    (select sum(pages_mem) * 4096 from pgfincore(c.oid::regclass)) as size_in_pagecache
FROM pg_buffercache b
INNER JOIN pg_class c ON b.relfilenode = pg_relation_filenode(c.oid)
AND b.reldatabase IN (0, (SELECT oid FROM pg_database WHERE datname = current_database()))
GROUP BY 1)
SELECT
    pg_size_pretty(sum(qq.size)) AS shared_buffers_size,
    pg_size_pretty(sum(qq.size_in_pagecache)) AS size_in_pagecache,
    pg_size_pretty(pg_database_size(current_database())) as database_size
FROM qq;
select 
    sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as percenage,
    sum(heap_blks_hit) as quantity_hit,
    sum(heap_blks_read) as quantity_read
from pg_statio_user_tables;

以上是关于sql 有用的Postgres查询的主要内容,如果未能解决你的问题,请参考以下文章

Postgres 的 SQL 查询

sql postgres中的SQL查询

将 T-SQL 查询转换为 Postgres

sql Postgres查询列表和kill查询

如何在没有连接的情况下为 postgres (Redshift) 生成 SQL 查询?

GCP SQL Postgres 权限问题:无法使用生成的 symfony db 使用 postgres 用户运行查询