Commit 54823e4b authored by HTH's avatar HTH

整理目录

parent 62a937d5
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="SqlDialectMappings">
<file url="file://$PROJECT_DIR$/fox/预发布.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/mutations.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/thinker-cloud/DDL.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/thinker-cloud/gateway_mqtt_log查询.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/thinker-cloud/三赢/清除三赢数据.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/thinker-cloud/性能排查.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/thinker-cloud/测试sql.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/慢sql.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/按分区查询及删除数据.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/按分区查询及删除系统表数据.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/查询计划.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/系统表TTL.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/统计磁盘.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/初始化库/系统表TTL.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/工具SQL/mutations.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/工具SQL/按分区查询及删除数据.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/工具SQL/按分区查询及删除系统表数据.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/工具SQL/排查慢sql.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/工具SQL/查询计划.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/工具SQL/统计磁盘.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/测试相关/24.3.6版本新功能测试.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/../fox/ddl.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/../fox/pgToClickhouse.sql" dialect="ClickHouse" />
<file url="file://$PROJECT_DIR$/../fox/test_CollapsingMergeTree.sql" dialect="ClickHouse" />
......
ALTER TABLE system.query_thread_log MODIFY TTL event_date + toIntervalDay(1);
ALTER TABLE system.query_log MODIFY TTL event_date + toIntervalDay(1);
ALTER TABLE system.trace_log MODIFY TTL event_date + toIntervalDay(1);
ALTER TABLE system.asynchronous_metric_log MODIFY TTL event_date + toIntervalDay(1);
ALTER TABLE system.session_log MODIFY TTL event_date + toIntervalDay(1);
ALTER TABLE system.part_log MODIFY TTL event_date + toIntervalDay(1);
ALTER TABLE system.metric_log MODIFY TTL event_date + toIntervalDay(1);
truncate table system.query_log;
\ No newline at end of file
select base.*
from meter_data as base
where timestamp >='2022-10-14 16:00:00' and timestamp <='2022-10-14 16:59:59'
and base.meter_attr_id in(32546,31859)
order by timestamp desc
limit 1
with
temp as (
select base.*
from meter_data as base
where timestamp >='2022-10-14 16:00:00' and timestamp <='2022-10-14 17:59:59'
and base.meter_attr_id in(32546,31859)
)
select temp.*
from temp
where id in(
select min(temp.id) from temp group by meter_attr_id
)
-- 所有站用电量之和
select sum(toDecimal256(value, 4) )
from meter_data
where timestamp >='2022-10-16 20:00:00' and timestamp <='2022-10-16 20:59:59'
and meter_attr_id in (
38548
,36892
,36894
,36899
,38550
,38552
);
-- 所有站用气量之和
select sum(toDecimal256(value, 4) )
from meter_data
where timestamp >='2022-10-16 19:00:00' and timestamp <='2022-10-16 19:59:59'
and meter_attr_id in (
38547
,36893
,36895
,36898
,38549
,38551
);
......@@ -29,6 +29,9 @@ create table IF NOT EXISTS meter_data
PARTITION BY toDate(timestamp)
ORDER BY (timestamp, meter_attr_id);
-- 创建跳树索引
ALTER TABLE meter_data ADD INDEX meter_data_attr_id_vix meter_attr_id TYPE bloom_filter GRANULARITY 1;
-- 创建 meter_analysis 表
create table IF NOT EXISTS meter_analysis
(
......@@ -42,25 +45,6 @@ create table IF NOT EXISTS meter_analysis
PARTITION BY toDate(timestamp)
ORDER BY (timestamp, meter_attr_id);
-- 创建 meter_event_log 表
-- create table IF NOT EXISTS meter_event_log
-- (
-- id Int64,
-- event_id Int64,
-- event_value String,
-- description String,
-- event_name String,
-- create_time DateTime64(6),
-- gateway_id Int64,
-- polling_id String,
-- raw_data_id Int64,
-- original_value String,
-- custom_is UInt8,
-- last_value String,
-- change_is UInt8
-- ) ENGINE = MergeTree
-- PARTITION BY toDate(create_time)
-- ORDER BY create_time;
-- 创建 reporting_raw_data 表
create table IF NOT EXISTS reporting_raw_data
......@@ -156,8 +140,6 @@ ORDER BY (timestamp,
SETTINGS index_granularity = 8192;
-- 创建 meter_event_log 表
create table IF NOT EXISTS meter_event_log
(
......
with
temp as (
select base.*
from meter_data as base
where timestamp >='2021-11-04 11:00:00' and timestamp <='2021-11-04 12:00:00' and meter_attr_id in(78038,28775)
order by timestamp desc
)
select temp.*
from temp
where id in(
select
max(temp.id)
from temp group by meter_attr_id
)
select count(1)
from gateway_mqtt_log
\ No newline at end of file
......@@ -5,4 +5,7 @@ ALTER TABLE system.trace_log MODIFY TTL event_date + toIntervalDay(10);
ALTER TABLE system.asynchronous_metric_log MODIFY TTL event_date + toIntervalDay(10);
ALTER TABLE system.session_log MODIFY TTL event_date + toIntervalDay(10);
ALTER TABLE system.part_log MODIFY TTL event_date + toIntervalDay(10);
ALTER TABLE system.metric_log MODIFY TTL event_date + toIntervalDay(30);
\ No newline at end of file
ALTER TABLE system.metric_log MODIFY TTL event_date + toIntervalDay(30);
-- truncate table system.query_log;
\ No newline at end of file
select *
from meter_data FINAL;
select meter_attr_id,timestamp
,argMinMerge(first) as first
,argMaxMerge(last) as last
,maxMerge(max) as max
,minMerge(min) as min
,avgMerge(avg) as avg
,sumMerge(sum) as sum
,countMerge(count) as count
from meter_attr_hour_statistics
GROUP BY meter_attr_id,timestamp
ORDER BY timestamp DESC;
select meter_attr_id,toStartOfDay(base.timestamp) as time
,argMin(first, base.timestamp) as first
,argMax(last, base.timestamp) as last
,max(max) as max
,min(min) as min
,sum/count as avg
,sum(sum) as sum
,sum(count) as count
from(
select meter_attr_id,timestamp
,argMinMerge(first) as first
,argMaxMerge(last) as last
,maxMerge(max) as max
,minMerge(min) as min
,avgMerge(avg) as avg
,sumMerge(sum) as sum
,countMerge(count) as count
from meter_attr_hour_statistics
GROUP BY meter_attr_id,timestamp
ORDER BY timestamp DESC
) as base
group by meter_attr_id,time;
-- 插入数据
insert into meter_data values(1,'1.1',1,now(),1);
insert into meter_data values(9,'101',1,'2024-08-07 15:40:30.000000',1);
-- 物化视图
drop VIEW if exists meter_attr_hour_statistics_mv;
drop table if exists meter_attr_hour_statistics;
create table meter_attr_hour_statistics
(
"meter_attr_id" Int64,
"timestamp" DateTime64(0),
"first" AggregateFunction(argMin, String,DateTime64(6)),
"last" AggregateFunction(argMax, String,DateTime64(6)),
"min" AggregateFunction(min, Decimal64(6)),
"max" AggregateFunction(max, Decimal64(6)),
"sum" AggregateFunction(sum, Decimal64(6)),
"count" AggregateFunction(count, UInt32),
"avg" AggregateFunction(avg, Decimal64(6))
)
engine = AggregatingMergeTree
PARTITION BY toDate(timestamp)
ORDER BY (timestamp, meter_attr_id);
CREATE MATERIALIZED VIEW meter_attr_hour_statistics_mv
-- REFRESH EVERY 1 MINUTE
To meter_attr_hour_statistics As
select meter_attr_id
, toStartOfHour(base.timestamp) AS timestamp
, argMinState(base.value, base.timestamp) as first
, argMaxState(base.value, base.timestamp) as last
, minState(toDecimal64(base.value, 6)) as min
, maxState(toDecimal64(base.value, 6)) as max
, sumState(toDecimal64(base.value, 6)) as sum
, countState(1) as count
, avgState(toDecimal64(base.value, 6)) as avg
from meter_data as base
GROUP BY meter_attr_id, timestamp
ORDER BY timestamp;
-- 表存储容量统计
select
database,
table,
formatReadableSize(size) as size,
formatReadableSize(bytes_on_disk) as bytes_on_disk,
formatReadableSize(data_uncompressed_bytes) as data_uncompressed_bytes,
formatReadableSize(data_compressed_bytes) as data_compressed_bytes,
compress_rate,
rows,
days,
formatReadableSize(avgDaySize) as avgDaySize
from
(
select
database,
table,
sum(bytes) as size,
sum(rows) as rows,
min(min_date) as min_date,
max(max_date) as max_date,
sum(bytes_on_disk) as bytes_on_disk,
sum(data_uncompressed_bytes) as data_uncompressed_bytes,
sum(data_compressed_bytes) as data_compressed_bytes,
(data_compressed_bytes / data_uncompressed_bytes) * 100 as compress_rate,
max_date - min_date as days,
size / (max_date - min_date) as avgDaySize
from system.parts
where active
-- and database = 'system'
and database = 'default'
-- and table = 'meter_data'
group by
database,
table
order by size desc
);
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment