|
| 1 | +-- temporary sql to initialise log tables for local development |
| 2 | +-- will be removed once we have migrations set up |
| 3 | +CREATE TABLE if not exists logs16 |
| 4 | +( |
| 5 | + `uuid` String, |
| 6 | + `team_id` Int32, |
| 7 | + `trace_id` String, |
| 8 | + `span_id` String, |
| 9 | + `trace_flags` Int32, |
| 10 | + `timestamp` DateTime64(6), |
| 11 | + `observed_timestamp` DateTime64(6), |
| 12 | + `created_at` DateTime64(6), |
| 13 | + `body` String, |
| 14 | + `severity_text` String, |
| 15 | + `severity_number` Int32, |
| 16 | + `service_name` String, |
| 17 | + `resource_attributes` Map(String, String), |
| 18 | + `resource_id` String, |
| 19 | + `instrumentation_scope` String, |
| 20 | + `event_name` String, |
| 21 | + `attributes` Map(String, String), |
| 22 | + `attributes_map_str` Map(String, String), |
| 23 | + `attributes_map_float` Map(String, Float64), |
| 24 | + `attributes_map_datetime` Map(String, DateTime64(6)), |
| 25 | + `attribute_keys` Array(String), |
| 26 | + `attribute_values` Array(String), |
| 27 | + `level` String ALIAS severity_text, |
| 28 | + INDEX idx_severity_text_set severity_text TYPE set(10) GRANULARITY 1, |
| 29 | + INDEX idx_attributes_str_keys mapKeys(attributes_map_str) TYPE bloom_filter(0.01) GRANULARITY 1, |
| 30 | + INDEX idx_attributes_str_values mapValues(attributes_map_str) TYPE bloom_filter(0.01) GRANULARITY 1, |
| 31 | + INDEX idx_body_ngram body TYPE ngrambf_v1(3, 20000, 4, 0) GRANULARITY 1 |
| 32 | +) |
| 33 | +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/logs16', '{replica}') |
| 34 | +PARTITION BY toDate(timestamp) |
| 35 | +ORDER BY (team_id, toStartOfMinute(timestamp) DESC, service_name, severity_text, toUnixTimestamp(timestamp) DESC, trace_id, span_id) |
| 36 | +SETTINGS |
| 37 | +allow_remote_fs_zero_copy_replication = 1, |
| 38 | +allow_experimental_reverse_key = 1; |
| 39 | + |
| 40 | +create or replace TABLE logs AS logs16 ENGINE = Distributed('posthog', 'default', 'logs16'); |
| 41 | + |
| 42 | +create table if not exists log_attributes |
| 43 | + |
| 44 | +( |
| 45 | + `team_id` Int32, |
| 46 | + `time_bucket` DateTime64(0), |
| 47 | + `service_name` LowCardinality(String), |
| 48 | + `attribute_key` LowCardinality(String), |
| 49 | + `attribute_value` String, |
| 50 | + `attribute_count` SimpleAggregateFunction(sum, UInt64), |
| 51 | + INDEX idx_attribute_key attribute_key TYPE bloom_filter(0.01) GRANULARITY 1, |
| 52 | + INDEX idx_attribute_value attribute_value TYPE bloom_filter(0.001) GRANULARITY 1, |
| 53 | + INDEX idx_attribute_key_n3 attribute_key TYPE ngrambf_v1(3, 32768, 3, 0) GRANULARITY 1, |
| 54 | + INDEX idx_attribute_value_n3 attribute_value TYPE ngrambf_v1(3, 32768, 3, 0) GRANULARITY 1 |
| 55 | +) |
| 56 | +ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{shard}/log_attributes', '{replica}') |
| 57 | +PARTITION BY toDate(time_bucket) |
| 58 | +ORDER BY (team_id, service_name, time_bucket, attribute_key, attribute_value); |
| 59 | + |
| 60 | +set enable_dynamic_type=1; |
| 61 | +CREATE MATERIALIZED VIEW if not exists log_to_log_attributes TO log_attributes |
| 62 | +( |
| 63 | + `team_id` Int32, |
| 64 | + `time_bucket` DateTime64(0), |
| 65 | + `service_name` LowCardinality(String), |
| 66 | + `attribute_key` LowCardinality(String), |
| 67 | + `attribute_value` String, |
| 68 | + `attribute_count` SimpleAggregateFunction(sum, UInt64) |
| 69 | +) |
| 70 | +AS SELECT |
| 71 | + team_id, |
| 72 | + time_bucket, |
| 73 | + service_name, |
| 74 | + attribute_key, |
| 75 | + attribute_value, |
| 76 | + attribute_count |
| 77 | +FROM (select |
| 78 | + team_id AS team_id, |
| 79 | + toStartOfInterval(timestamp, toIntervalMinute(10)) AS time_bucket, |
| 80 | + service_name AS service_name, |
| 81 | + arrayJoin(arrayMap((k, v) -> (k, if(length(v) > 256, '', v)), arrayFilter((k, v) -> (length(k) < 256), CAST(attributes, 'Array(Tuple(String, String))')))) AS attribute, |
| 82 | + attribute.1 AS attribute_key, |
| 83 | + CAST(JSONExtract(attribute.2, 'Dynamic'), 'String') AS attribute_value, |
| 84 | + sumSimpleState(1) AS attribute_count |
| 85 | +FROM logs16 |
| 86 | +GROUP BY |
| 87 | + team_id, |
| 88 | + time_bucket, |
| 89 | + service_name, |
| 90 | + attribute |
| 91 | +); |
| 92 | + |
| 93 | +CREATE OR REPLACE TABLE kafka_logs_avro |
| 94 | +( |
| 95 | + `uuid` String, |
| 96 | + `team_id` Int32, |
| 97 | + `trace_id` String, |
| 98 | + `span_id` String, |
| 99 | + `trace_flags` Int32, |
| 100 | + `timestamp` DateTime64(6), |
| 101 | + `observed_timestamp` DateTime64(6), |
| 102 | + `created_at` DateTime64(6), |
| 103 | + `body` String, |
| 104 | + `severity_text` String, |
| 105 | + `severity_number` Int32, |
| 106 | + `service_name` String, |
| 107 | + `resource_attributes` Map(String, String), |
| 108 | + `resource_id` String, |
| 109 | + `instrumentation_scope` String, |
| 110 | + `event_name` String, |
| 111 | + `attributes` Map(String, Nullable(String)), |
| 112 | + `attributes_map_str` Map(String, Nullable(String)), |
| 113 | + `attributes_map_float` Map(String, Nullable(Float64)), |
| 114 | + `attributes_map_datetime` Map(String, Nullable(DateTime64(6))), |
| 115 | + `attribute_keys` Array(Nullable(String)), |
| 116 | + `attribute_values` Array(Nullable(String)) |
| 117 | +) |
| 118 | +ENGINE = Kafka('kafka:9092', 'logs_avro', 'clickhouse-logs-avro', 'Avro') |
| 119 | +SETTINGS |
| 120 | + kafka_skip_broken_messages = 100, |
| 121 | + kafka_security_protocol = 'PLAINTEXT', |
| 122 | + kafka_thread_per_consumer = 1, |
| 123 | + kafka_num_consumers = 1, |
| 124 | + kafka_poll_timeout_ms=15000, |
| 125 | + kafka_poll_max_batch_size=100, |
| 126 | + kafka_max_block_size=1000; |
| 127 | + |
| 128 | +drop table if exists kafka_logs_avro_mv; |
| 129 | + |
| 130 | +CREATE MATERIALIZED VIEW kafka_logs_avro_mv TO logs16 |
| 131 | +( |
| 132 | + `uuid` String, |
| 133 | + `team_id` Int32, |
| 134 | + `trace_id` String, |
| 135 | + `span_id` String, |
| 136 | + `trace_flags` Int32, |
| 137 | + `timestamp` DateTime64(6), |
| 138 | + `observed_timestamp` DateTime64(6), |
| 139 | + `created_at` DateTime64(6), |
| 140 | + `body` String, |
| 141 | + `severity_text` String, |
| 142 | + `severity_number` Int32, |
| 143 | + `service_name` String, |
| 144 | + `resource_attributes` Map(String, String), |
| 145 | + `resource_id` String, |
| 146 | + `instrumentation_scope` String, |
| 147 | + `event_name` String, |
| 148 | + `attributes` Map(String, Nullable(String)), |
| 149 | + `attributes_map_str` Map(String, Nullable(String)), |
| 150 | + `attributes_map_float` Map(String, Nullable(Float64)), |
| 151 | + `attributes_map_datetime` Map(String, Nullable(DateTime64(6))), |
| 152 | + `attribute_keys` Array(Nullable(String)), |
| 153 | + `attribute_values` Array(Nullable(String)) |
| 154 | +) |
| 155 | +AS SELECT |
| 156 | +* |
| 157 | +FROM kafka_logs_avro settings materialize_skip_indexes_on_insert = 1, distributed_background_insert_sleep_time_ms=5000, distributed_background_insert_batch=true; |
| 158 | + |
| 159 | +select 'clickhouse logs tables initialised successfully!'; |
0 commit comments