ClickHouse
1617 строк · 77.6 Кб
1<!--
2NOTE: User and query level settings are set up in "users.xml" file.
3If you have accidentally specified user-level settings here, server won't start.
4You can either move the settings to the right place inside "users.xml" file
5or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
6-->
7<clickhouse>8<logger>9<!-- Possible levels [1]:10
11- none (turns off logging)
12- fatal
13- critical
14- error
15- warning
16- notice
17- information
18- debug
19- trace
20- test (not for production usage)
21
22[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
23-->
24<level>trace</level>25<log>/var/log/clickhouse-server/clickhouse-server.log</log>26<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>27<!-- Rotation policy28See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
29-->
30<size>1000M</size>31<count>10</count>32<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->33
34<!-- Per level overrides (legacy):35
36For example to suppress logging of the ConfigReloader you can use:
37NOTE: levels.logger is reserved, see below.
38-->
39<!--40<levels>
41<ConfigReloader>none</ConfigReloader>
42</levels>
43-->
44
45<!-- Per level overrides:46
47For example to suppress logging of the RBAC for default user you can use:
48(But please note that the logger name maybe changed from version to version, even after minor upgrade)
49-->
50<!--51<levels>
52<logger>
53<name>ContextAccess (default)</name>
54<level>none</level>
55</logger>
56<logger>
57<name>DatabaseOrdinary (test)</name>
58<level>none</level>
59</logger>
60</levels>
61-->
62<!-- Structured log formatting:63You can specify log format(for now, JSON only). In that case, the console log will be printed
64in specified format like JSON.
65For example, as below:
66{"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
67To enable JSON logging support, please uncomment the entire <formatting> tag below.
68
69a) You can modify key names by changing values under tag values inside <names> tag.
70For example, to change DATE_TIME to MY_DATE_TIME, you can do like:
71<date_time>MY_DATE_TIME</date_time>
72b) You can stop unwanted log properties to appear in logs. To do so, you can simply comment out (recommended)
73that property from this file.
74For example, if you do not want your log to print query_id, you can comment out only <query_id> tag.
75However, if you comment out all the tags under <names>, the program will print default values for as
76below.
77-->
78<!-- <formatting>79<type>json</type>
80<names>
81<date_time>date_time</date_time>
82<thread_name>thread_name</thread_name>
83<thread_id>thread_id</thread_id>
84<level>level</level>
85<query_id>query_id</query_id>
86<logger_name>logger_name</logger_name>
87<message>message</message>
88<source_file>source_file</source_file>
89<source_line>source_line</source_line>
90</names>
91</formatting> -->
92</logger>93
94<url_scheme_mappers>95<s3>96<to>https://{bucket}.s3.amazonaws.com</to>97</s3>98<gs>99<to>https://storage.googleapis.com/{bucket}</to>100</gs>101<oss>102<to>https://{bucket}.oss.aliyuncs.com</to>103</oss>104</url_scheme_mappers>105
106<!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. -->107<http_options_response>108<header>109<name>Access-Control-Allow-Origin</name>110<value>*</value>111</header>112<header>113<name>Access-Control-Allow-Headers</name>114<value>origin, x-requested-with, x-clickhouse-format, x-clickhouse-user, x-clickhouse-key, Authorization</value>115</header>116<header>117<name>Access-Control-Allow-Methods</name>118<value>POST, GET, OPTIONS</value>119</header>120<header>121<name>Access-Control-Max-Age</name>122<value>86400</value>123</header>124</http_options_response>125
126<!-- It is the name that will be shown in the clickhouse-client.127By default, anything with "production" will be highlighted in red in query prompt.
128-->
129<!--display_name>production</display_name-->130
131<!-- Port for HTTP API. See also 'https_port' for secure connections.132This interface is also used by ODBC and JDBC drivers (DataGrip, Dbeaver, ...)
133and by most of web interfaces (embedded UI, Grafana, Redash, ...).
134-->
135<http_port>8123</http_port>136
137<!-- Port for interaction by native protocol with:138- clickhouse-client and other native ClickHouse tools (clickhouse-benchmark);
139- clickhouse-server with other clickhouse-servers for distributed query processing;
140- ClickHouse drivers and applications supporting native protocol
141(this protocol is also informally called as "the TCP protocol");
142See also 'tcp_port_secure' for secure connections.
143-->
144<tcp_port>9000</tcp_port>145
146<!-- Compatibility with MySQL protocol.147ClickHouse will pretend to be MySQL for applications connecting to this port.
148-->
149<mysql_port>9004</mysql_port>150
151<!-- Compatibility with PostgreSQL protocol.152ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
153-->
154<postgresql_port>9005</postgresql_port>155
156<!-- HTTP API with TLS (HTTPS).157You have to configure certificate to enable this interface.
158See the openSSL section below.
159-->
160<!-- <https_port>8443</https_port> -->161
162<!-- Native interface with TLS.163You have to configure certificate to enable this interface.
164See the openSSL section below.
165-->
166<!-- <tcp_port_secure>9440</tcp_port_secure> -->167
168<!-- Native interface wrapped with PROXYv1 protocol169PROXYv1 header sent for every connection.
170ClickHouse will extract information about proxy-forwarded client address from the header.
171-->
172<!-- <tcp_with_proxy_port>9011</tcp_with_proxy_port> -->173
174<!-- Port for communication between replicas. Used for data exchange.175It provides low-level data access between servers.
176This port should not be accessible from untrusted networks.
177See also 'interserver_http_credentials'.
178Data transferred over connections to this port should not go through untrusted networks.
179See also 'interserver_https_port'.
180-->
181<interserver_http_port>9009</interserver_http_port>182
183<!-- Port for communication between replicas with TLS.184You have to configure certificate to enable this interface.
185See the openSSL section below.
186See also 'interserver_http_credentials'.
187-->
188<!-- <interserver_https_port>9010</interserver_https_port> -->189
190<!-- Hostname that is used by other replicas to request this server.191If not specified, then it is determined analogous to 'hostname -f' command.
192This setting could be used to switch replication to another network interface
193(the server may be connected to multiple networks via multiple addresses)
194-->
195
196<!--197<interserver_http_host>example.clickhouse.com</interserver_http_host>
198-->
199
200<!-- You can specify credentials for authenthication between replicas.201This is required when interserver_https_port is accessible from untrusted networks,
202and also recommended to avoid SSRF attacks from possibly compromised services in your network.
203-->
204<!--<interserver_http_credentials>205<user>interserver</user>
206<password></password>
207</interserver_http_credentials>-->
208
209<!-- Listen specified address.210Use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere.
211Notes:
212If you open connections from wildcard address, make sure that at least one of the following measures applied:
213- server is protected by firewall and not accessible from untrusted networks;
214- all users are restricted to subset of network addresses (see users.xml);
215- all users have strong passwords, only secure (TLS) interfaces are accessible, or connections are only made via TLS interfaces.
216- users without password have readonly access.
217See also: https://www.shodan.io/search?query=clickhouse
218-->
219<!-- <listen_host>::</listen_host> -->220
221
222<!-- Same for hosts without support for IPv6: -->223<!-- <listen_host>0.0.0.0</listen_host> -->224
225<!-- Default values - try listen localhost on IPv4 and IPv6. -->226<!--227<listen_host>::1</listen_host>
228<listen_host>127.0.0.1</listen_host>
229-->
230
231<!-- <interserver_listen_host>::</interserver_listen_host> -->232<!-- Listen host for communication between replicas. Used for data exchange -->233<!-- Default values - equal to listen_host -->234
235<!-- Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen. -->236<!-- <listen_try>0</listen_try> -->237
238<!-- Allow multiple servers to listen on the same address:port. This is not recommended.239-->
240<!-- <listen_reuse_port>0</listen_reuse_port> -->241
242<!-- <listen_backlog>4096</listen_backlog> -->243
244<max_connections>4096</max_connections>245
246<!-- For 'Connection: keep-alive' in HTTP 1.1 -->247<keep_alive_timeout>10</keep_alive_timeout>248
249<!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) -->250<!-- <grpc_port>9100</grpc_port> -->251<grpc>252<enable_ssl>false</enable_ssl>253
254<!-- The following two files are used only if enable_ssl=1 -->255<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>256<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>257
258<!-- Whether server will request client for a certificate -->259<ssl_require_client_auth>false</ssl_require_client_auth>260
261<!-- The following file is used only if ssl_require_client_auth=1 -->262<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>263
264<!-- Default transport compression type (can be overridden by client, see the transport_compression_type field in QueryInfo).265Supported algorithms: none, deflate, gzip, stream_gzip -->
266<transport_compression_type>none</transport_compression_type>267
268<!-- Default transport compression level. Supported levels: 0..3 -->269<transport_compression_level>0</transport_compression_level>270
271<!-- Send/receive message size limits in bytes. -1 means unlimited -->272<max_send_message_size>-1</max_send_message_size>273<max_receive_message_size>-1</max_receive_message_size>274
275<!-- Enable if you want very detailed logs -->276<verbose_logs>false</verbose_logs>277</grpc>278
279<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->280<openSSL>281<server> <!-- Used for https server AND secure tcp port -->282<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->283<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>284<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
285<!-- dhparams are optional. You can delete the <dhParamsFile> element.286To generate dhparams, use the following command:
287openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
288Only file format with BEGIN DH PARAMETERS is supported.
289-->
290<!-- <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>-->291<verificationMode>none</verificationMode>292<loadDefaultCAFile>true</loadDefaultCAFile>293<cacheSessions>true</cacheSessions>294<disableProtocols>sslv2,sslv3</disableProtocols>295<preferServerCiphers>true</preferServerCiphers>296
297<invalidCertificateHandler>298<!-- The server, in contrast to the client, cannot ask about the certificate interactively.299The only reasonable option is to reject.
300-->
301<name>RejectCertificateHandler</name>302</invalidCertificateHandler>303</server>304
305<client> <!-- Used for connecting to https dictionary source and secured Zookeeper communication -->306<loadDefaultCAFile>true</loadDefaultCAFile>307<cacheSessions>true</cacheSessions>308<disableProtocols>sslv2,sslv3</disableProtocols>309<preferServerCiphers>true</preferServerCiphers>310<!-- Use for self-signed: <verificationMode>none</verificationMode> -->311<invalidCertificateHandler>312<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->313<name>RejectCertificateHandler</name>314</invalidCertificateHandler>315</client>316</openSSL>317
318<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->319<!--320<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
321-->
322
323<!-- The maximum number of query processing threads, excluding threads for retrieving data from remote servers, allowed to run all queries.324This is not a hard limit. In case if the limit is reached the query will still get at least one thread to run.
325Query can upscale to desired number of threads during execution if more threads become available.
326-->
327<concurrent_threads_soft_limit_num>0</concurrent_threads_soft_limit_num>328<concurrent_threads_soft_limit_ratio_to_cores>2</concurrent_threads_soft_limit_ratio_to_cores>329
330<!-- Maximum number of concurrent queries. -->331<max_concurrent_queries>1000</max_concurrent_queries>332
333<!-- Maximum memory usage (resident set size) for server process.334Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
335If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
336
337The constraint is checked on query execution time.
338If a query tries to allocate memory and the current memory usage plus allocation is greater
339than specified threshold, exception will be thrown.
340
341It is not practical to set this constraint to small values like just a few gigabytes,
342because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
343-->
344<max_server_memory_usage>0</max_server_memory_usage>345
346<!-- Maximum number of threads in the Global thread pool.347This will default to a maximum of 10000 threads if not specified.
348This setting will be useful in scenarios where there are a large number
349of distributed queries that are running concurrently but are idling most
350of the time, in which case a higher number of threads might be required.
351-->
352
353<max_thread_pool_size>10000</max_thread_pool_size>354
355<!-- Configure other thread pools: -->356<!--357<background_buffer_flush_schedule_pool_size>16</background_buffer_flush_schedule_pool_size>
358<background_pool_size>16</background_pool_size>
359<background_merges_mutations_concurrency_ratio>2</background_merges_mutations_concurrency_ratio>
360<background_merges_mutations_scheduling_policy>round_robin</background_merges_mutations_scheduling_policy>
361<background_move_pool_size>8</background_move_pool_size>
362<background_fetches_pool_size>8</background_fetches_pool_size>
363<background_common_pool_size>8</background_common_pool_size>
364<background_schedule_pool_size>128</background_schedule_pool_size>
365<background_message_broker_schedule_pool_size>16</background_message_broker_schedule_pool_size>
366<background_distributed_schedule_pool_size>16</background_distributed_schedule_pool_size>
367<tables_loader_foreground_pool_size>0</tables_loader_foreground_pool_size>
368<tables_loader_background_pool_size>0</tables_loader_background_pool_size>
369-->
370
371<!-- Enables asynchronous loading of databases and tables to speedup server startup.372Queries to not yet loaded entity will be blocked until load is finished.
373-->
374<!-- <async_load_databases>true</async_load_databases> -->375
376<!-- On memory constrained environments you may have to set this to value larger than 1.377-->
378<max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>379
380<!-- Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).381Data will be stored in system.trace_log table with query_id = empty string.
382Zero means disabled.
383-->
384<total_memory_profiler_step>4194304</total_memory_profiler_step>385
386<!-- Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.387The probability is for every alloc/free regardless to the size of the allocation.
388Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
389which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
390You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
391-->
392<total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability>393
394<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve395correct maximum value. -->
396<!-- <max_open_files>262144</max_open_files> -->397
398<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.399In bytes. Cache is single for server. Memory is allocated only on demand.
400Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
401Uncompressed cache is advantageous only for very short queries and in rare cases.
402
403Note: uncompressed cache can be pointless for lz4, because memory bandwidth
404is slower than multi-core decompression on some server configurations.
405Enabling it can sometimes paradoxically make queries slower.
406-->
407<uncompressed_cache_size>8589934592</uncompressed_cache_size>408
409<!-- Approximate size of mark cache, used in tables of MergeTree family.410In bytes. Cache is single for server. Memory is allocated only on demand.
411You should not lower this value.
412-->
413<mark_cache_size>5368709120</mark_cache_size>414
415<!-- For marks of secondary indices.416-->
417<index_mark_cache_size>5368709120</index_mark_cache_size>418
419<!-- If you enable the `min_bytes_to_use_mmap_io` setting,420the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace.
421It makes sense only for large files and helps only if data reside in page cache.
422To avoid frequent open/mmap/munmap/close calls (which are very expensive due to consequent page faults)
423and to reuse mappings from several threads and queries,
424the cache of mapped files is maintained. Its size is the number of mapped regions (usually equal to the number of mapped files).
425The amount of data in mapped files can be monitored
426in system.metrics, system.metric_log by the MMappedFiles, MMappedFileBytes metrics
427and in system.asynchronous_metrics, system.asynchronous_metrics_log by the MMapCacheCells metric,
428and also in system.events, system.processes, system.query_log, system.query_thread_log, system.query_views_log by the
429CreatedReadBufferMMap, CreatedReadBufferMMapFailed, MMappedFileCacheHits, MMappedFileCacheMisses events.
430Note that the amount of data in mapped files does not consume memory directly and is not accounted
431in query or server memory usage - because this memory can be discarded similar to OS page cache.
432The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree,
433also it can be dropped manually by the SYSTEM DROP MMAP CACHE query.
434-->
435<mmap_cache_size>1000</mmap_cache_size>436
437<!-- Cache size in bytes for compiled expressions.-->438<compiled_expression_cache_size>134217728</compiled_expression_cache_size>439
440<!-- Cache size in elements for compiled expressions.-->441<compiled_expression_cache_elements_size>10000</compiled_expression_cache_elements_size>442
443<!-- Cache path for custom (created from SQL) cached disks -->444<custom_cached_disks_base_directory>/var/lib/clickhouse/caches/</custom_cached_disks_base_directory>445
446<validate_tcp_client_information>false</validate_tcp_client_information>447
448<!-- Path to data directory, with trailing slash. -->449<path>/var/lib/clickhouse/</path>450
451<!-- Multi-disk configuration example: -->452<!--453<storage_configuration>
454<disks>
455<default>
456<keep_free_space_bytes>0</keep_free_space_bytes>
457</default>
458<data>
459<path>/data/</path>
460<keep_free_space_bytes>0</keep_free_space_bytes>
461</data>
462<s3>
463<type>s3</type>
464<endpoint>http://path/to/endpoint</endpoint>
465<access_key_id>your_access_key_id</access_key_id>
466<secret_access_key>your_secret_access_key</secret_access_key>
467</s3>
468<blob_storage_disk>
469<type>azure_blob_storage</type>
470<storage_account_url>http://account.blob.core.windows.net</storage_account_url>
471<container_name>container</container_name>
472<account_name>account</account_name>
473<account_key>pass123</account_key>
474<metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
475<skip_access_check>false</skip_access_check>
476</blob_storage_disk>
477</disks>
478
479<policies>
480<all>
481<volumes>
482<main>
483<disk>default</disk>
484<disk>data</disk>
485<disk>s3</disk>
486<disk>blob_storage_disk</disk>
487
488<max_data_part_size_bytes></max_data_part_size_bytes>
489<max_data_part_size_ratio></max_data_part_size_ratio>
490<perform_ttl_move_on_insert>true</perform_ttl_move_on_insert>
491<load_balancing>round_robin</load_balancing>
492</main>
493</volumes>
494<move_factor>0.2</move_factor>
495</all>
496</policies>
497</storage_configuration>
498-->
499
500
501<!-- Path to temporary data for processing hard queries. -->502<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>503
504<!-- Disable AuthType plaintext_password and no_password for ACL. -->505<allow_plaintext_password>1</allow_plaintext_password>506<allow_no_password>1</allow_no_password>507<allow_implicit_no_password>1</allow_implicit_no_password>508
509<!-- When a user does not specify a password type in the CREATE USER query, the default password type is used.510Accepted values are: 'plaintext_password', 'sha256_password', 'double_sha1_password', 'bcrypt_password'.
511-->
512<default_password_type>sha256_password</default_password_type>513
514<!-- Work factor for bcrypt_password authentication type-->515<bcrypt_workfactor>12</bcrypt_workfactor>516
517<!-- Complexity requirements for user passwords. -->518<!-- <password_complexity>519<rule>
520<pattern>.{12}</pattern>
521<message>be at least 12 characters long</message>
522</rule>
523<rule>
524<pattern>\p{N}</pattern>
525<message>contain at least 1 numeric character</message>
526</rule>
527<rule>
528<pattern>\p{Ll}</pattern>
529<message>contain at least 1 lowercase character</message>
530</rule>
531<rule>
532<pattern>\p{Lu}</pattern>
533<message>contain at least 1 uppercase character</message>
534</rule>
535<rule>
536<pattern>[^\p{L}\p{N}]</pattern>
537<message>contain at least 1 special character</message>
538</rule>
539</password_complexity> -->
540
541<!-- Policy from the <storage_configuration> for the temporary files.542If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
543
544Notes:
545- move_factor is ignored
546- keep_free_space_bytes is ignored
547- max_data_part_size_bytes is ignored
548- you must have exactly one volume in that policy
549-->
550<!-- <tmp_policy>tmp</tmp_policy> -->551
552<!-- Directory with user provided files that are accessible by 'file' table function. -->553<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>554
555<!-- LDAP server definitions. -->556<ldap_servers>557<!-- List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users,558who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories.
559Parameters:
560host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
561port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
562bind_dn - template used to construct the DN to bind to.
563The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual
564user name during each authentication attempt.
565user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user.
566This is mainly used in search filters for further role mapping when the server is Active Directory. The
567resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default,
568user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected
569user DN value.
570base_dn - template used to construct the base DN for the LDAP search.
571The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings
572of the template with the actual user name and bind DN during the LDAP search.
573scope - scope of the LDAP search.
574Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
575search_filter - template used to construct the search filter for the LDAP search.
576The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}'
577substrings of the template with the actual user name, bind DN, and base DN during the LDAP search.
578Note, that the special characters must be escaped properly in XML.
579verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed
580to be successfully authenticated for all consecutive requests without contacting the LDAP server.
581Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request.
582enable_tls - flag to trigger use of secure connection to the LDAP server.
583Specify 'no' for plain text (ldap://) protocol (not recommended).
584Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
585Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
586tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
587Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
588tls_require_cert - SSL/TLS peer certificate verification behavior.
589Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
590tls_cert_file - path to certificate file.
591tls_key_file - path to certificate key file.
592tls_ca_cert_file - path to CA certificate file.
593tls_ca_cert_dir - path to the directory containing CA certificates.
594tls_cipher_suite - allowed cipher suite (in OpenSSL notation).
595Example:
596<my_ldap_server>
597<host>localhost</host>
598<port>636</port>
599<bind_dn>uid={user_name},ou=users,dc=example,dc=com</bind_dn>
600<verification_cooldown>300</verification_cooldown>
601<enable_tls>yes</enable_tls>
602<tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>
603<tls_require_cert>demand</tls_require_cert>
604<tls_cert_file>/path/to/tls_cert_file</tls_cert_file>
605<tls_key_file>/path/to/tls_key_file</tls_key_file>
606<tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>
607<tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>
608<tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>
609</my_ldap_server>
610Example (typical Active Directory with configured user DN detection for further role mapping):
611<my_ad_server>
612<host>localhost</host>
613<port>389</port>
614<bind_dn>EXAMPLE\{user_name}</bind_dn>
615<user_dn_detection>
616<base_dn>CN=Users,DC=example,DC=com</base_dn>
617<search_filter>(&(objectClass=user)(sAMAccountName={user_name}))</search_filter>
618</user_dn_detection>
619<enable_tls>no</enable_tls>
620</my_ad_server>
621-->
622</ldap_servers>623
624<!-- To enable Kerberos authentication support for HTTP requests (GSS-SPNEGO), for those users who are explicitly configured625to authenticate via Kerberos, define a single 'kerberos' section here.
626Parameters:
627principal - canonical service principal name, that will be acquired and used when accepting security contexts.
628This parameter is optional, if omitted, the default principal will be used.
629This parameter cannot be specified together with 'realm' parameter.
630realm - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it.
631This parameter is optional, if omitted, no additional filtering by realm will be applied.
632This parameter cannot be specified together with 'principal' parameter.
633Example:
634<kerberos />
635Example:
636<kerberos>
637<principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
638</kerberos>
639Example:
640<kerberos>
641<realm>EXAMPLE.COM</realm>
642</kerberos>
643-->
644
645<!-- Sources to read users, roles, access rights, profiles of settings, quotas. -->646<user_directories>647<users_xml>648<!-- Path to configuration file with predefined users. -->649<path>users.xml</path>650</users_xml>651<local_directory>652<!-- Path to folder where users created by SQL commands are stored. -->653<path>/var/lib/clickhouse/access/</path>654</local_directory>655
656<!-- To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section657with the following parameters:
658server - one of LDAP server names defined in 'ldap_servers' config section above.
659This parameter is mandatory and cannot be empty.
660roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
661If no roles are specified here or assigned during role mapping (below), user will not be able to perform any
662actions after authentication.
663role_mapping - section with LDAP search parameters and mapping rules.
664When a user authenticates, while still bound to LDAP, an LDAP search is performed using search_filter and the
665name of the logged in user. For each entry found during that search, the value of the specified attribute is
666extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the
667value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by
668CREATE ROLE command.
669There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be
670applied.
671base_dn - template used to construct the base DN for the LDAP search.
672The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}'
673substrings of the template with the actual user name, bind DN, and user DN during each LDAP search.
674scope - scope of the LDAP search.
675Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
676search_filter - template used to construct the search filter for the LDAP search.
677The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and
678'{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during
679each LDAP search.
680Note, that the special characters must be escaped properly in XML.
681attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default.
682prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by
683the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated
684as local role names. Empty, by default.
685Example:
686<ldap>
687<server>my_ldap_server</server>
688<roles>
689<my_local_role1 />
690<my_local_role2 />
691</roles>
692<role_mapping>
693<base_dn>ou=groups,dc=example,dc=com</base_dn>
694<scope>subtree</scope>
695<search_filter>(&(objectClass=groupOfNames)(member={bind_dn}))</search_filter>
696<attribute>cn</attribute>
697<prefix>clickhouse_</prefix>
698</role_mapping>
699</ldap>
700Example (typical Active Directory with role mapping that relies on the detected user DN):
701<ldap>
702<server>my_ad_server</server>
703<role_mapping>
704<base_dn>CN=Users,DC=example,DC=com</base_dn>
705<attribute>CN</attribute>
706<scope>subtree</scope>
707<search_filter>(&(objectClass=group)(member={user_dn}))</search_filter>
708<prefix>clickhouse_</prefix>
709</role_mapping>
710</ldap>
711-->
712</user_directories>713
714<access_control_improvements>715<!-- Enables logic that users without permissive row policies can still read rows using a SELECT query.716For example, if there two users A, B and a row policy is defined only for A, then
717if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
718By default this setting is false for compatibility with earlier access configurations. -->
719<users_without_row_policies_can_read_rows>true</users_without_row_policies_can_read_rows>720
721<!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,722however you can change this behaviour by setting this to true -->
723<on_cluster_queries_require_cluster_grant>true</on_cluster_queries_require_cluster_grant>724
725<!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed726by any user. You can change this behaviour by setting this to true.
727If it's set to true then this query requires "GRANT SELECT ON system.<table>" just like as for non-system tables.
728Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors")
729are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system
730table (i.e. "system.users") will be accessible. -->
731<select_from_system_db_requires_grant>true</select_from_system_db_requires_grant>732
733<!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be734executed by any user. You can change this behaviour by setting this to true.
735If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. -->
736<select_from_information_schema_requires_grant>true</select_from_information_schema_requires_grant>737
738<!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from739previous profile. You can change this behaviour by setting this to true.
740If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all
741actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint.
742It also enables 'changeable_in_readonly' constraint type -->
743<settings_constraints_replace_previous>true</settings_constraints_replace_previous>744
745<!-- Number of seconds since last access a role is stored in the Role Cache -->746<role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds>747</access_control_improvements>748
749<!-- Default profile of settings. -->750<default_profile>default</default_profile>751
752<!-- Comma-separated list of prefixes for user-defined settings.753The server will allow to set these settings, and retrieve them with the getSetting function.
754They are also logged in the query_log, similarly to other settings, but have no special effect.
755The "SQL_" prefix is introduced for compatibility with MySQL - these settings are being set by Tableau.
756-->
757<custom_settings_prefixes>SQL_</custom_settings_prefixes>758
759<!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). -->760<!-- <system_profile>default</system_profile> -->761
762<!-- Buffer profile of settings.763This settings are used by Buffer storage to flush data to the underlying table.
764Default: used from system_profile directive.
765-->
766<!-- <buffer_profile>default</buffer_profile> -->767
768<!-- Default database. -->769<default_database>default</default_database>770
771<!-- Server time zone could be set here.772
773Time zone is used when converting between String and DateTime types,
774when printing DateTime in text formats and parsing DateTime from text,
775it is used in date and time related functions, if specific time zone was not passed as an argument.
776
777Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
778If not specified, system time zone at server startup is used.
779
780Please note, that server could display time zone alias instead of specified name.
781Example: Zulu is an alias for UTC.
782-->
783<!-- <timezone>UTC</timezone> -->784
785<!-- You can specify umask here (see "man umask"). Server will apply it on startup.786Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
787-->
788<!-- <umask>022</umask> -->789
790<!-- Perform mlockall after startup to lower first queries latency791and to prevent clickhouse executable from being paged out under high IO load.
792Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
793-->
794<mlock_executable>true</mlock_executable>795
796<!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. -->797<remap_executable>false</remap_executable>798
799<![CDATA[800Uncomment below in order to use JDBC table engine and function.
801
802To install and run JDBC bridge in background:
803* [Debian/Ubuntu]
804export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
805export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
806wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
807apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
808clickhouse-jdbc-bridge &
809
810* [CentOS/RHEL]
811export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
812export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
813wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
814yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
815clickhouse-jdbc-bridge &
816
817Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
818]]>
819<!--820<jdbc_bridge>
821<host>127.0.0.1</host>
822<port>9019</port>
823</jdbc_bridge>
824-->
825
826<!-- Configuration of clusters that could be used in Distributed tables.827https://clickhouse.com/docs/en/operations/table_engines/distributed/
828-->
829<remote_servers>830<!-- Test only shard config for testing distributed storage -->831<default>832<!-- Inter-server per-cluster secret for Distributed queries833default: no secret (no authentication will be performed)
834
835If set, then Distributed queries will be validated on shards, so at least:
836- such cluster should exist on the shard,
837- such cluster should have the same secret.
838
839And also (and which is more important), the initial_user will
840be used as current user for the query.
841
842Right now the protocol is pretty simple, and it only takes into account:
843- cluster name
844- query
845
846Also, it will be nice if the following will be implemented:
847- source hostname (see interserver_http_host), but then it will depend on DNS,
848it can use IP address instead, but then you need to get correct on the initiator node.
849- target hostname / ip address (same notes as for source hostname)
850- time-based security tokens
851-->
852<!-- <secret></secret> -->853
854<shard>855<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->856<!-- <internal_replication>false</internal_replication> -->857<!-- Optional. Shard weight when writing data. Default: 1. -->858<!-- <weight>1</weight> -->859<replica>860<host>localhost</host>861<port>9000</port>862<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->863<!-- <priority>1</priority> -->864<!-- Use SSL? Default: no -->865<!-- <secure>0</secure> -->866</replica>867</shard>868</default>869</remote_servers>870
871<!-- The list of hosts allowed to use in URL-related storage engines and table functions.872If this section is not present in configuration, all hosts are allowed.
873-->
874<!--<remote_url_allow_hosts>-->875<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.876Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
877If port is explicitly specified in URL, the host:port is checked as a whole.
878If host specified here without port, any port with this host allowed.
879"clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
880If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
881If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
882Host should be specified using the host xml tag:
883<host>clickhouse.com</host>
884-->
885
886<!-- Regular expression can be specified. RE2 engine is used for regexps.887Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
888(forgetting to do so is a common source of error).
889-->
890<!--</remote_url_allow_hosts>-->891
892<!-- The list of HTTP headers forbidden to use in HTTP-related storage engines and table functions.893If this section is not present in configuration, all headers are allowed.
894-->
895<!-- <http_forbid_headers>896<header>exact_header</header>
897<header_regexp>(?i)(case_insensitive_header)</header_regexp>
898</http_forbid_headers> -->
899
900<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.901By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
902Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
903-->
904
905<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.906Optional. If you don't use replicated tables, you could omit that.
907
908See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
909-->
910
911<!--912<zookeeper>
913<node>
914<host>example1</host>
915<port>2181</port>
916</node>
917<node>
918<host>example2</host>
919<port>2181</port>
920</node>
921<node>
922<host>example3</host>
923<port>2181</port>
924</node>
925</zookeeper>
926-->
927
928<!-- Substitutions for parameters of replicated tables.929Optional. If you don't use replicated tables, you could omit that.
930
931See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
932-->
933<!--934<macros>
935<shard>01</shard>
936<replica>example01-01-1</replica>
937</macros>
938-->
939
940<!--941<default_replica_path>/clickhouse/tables/{database}/{table}</default_replica_path>
942<default_replica_name>{replica}</default_replica_name>
943-->
944
945<!-- Replica group name for database Replicated.946The cluster created by Replicated database will consist of replicas in the same group.
947DDL queries will only wail for the replicas in the same group.
948Empty by default.
949-->
950<!--951<replica_group_name><replica_group_name>
952-->
953
954
955<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->956<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>957
958
959<!-- Maximum session timeout, in seconds. Default: 3600. -->960<max_session_timeout>3600</max_session_timeout>961
962<!-- Default session timeout, in seconds. Default: 60. -->963<default_session_timeout>60</default_session_timeout>964
965<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->966<!--967interval - send every X second
968root_path - prefix for keys
969hostname_in_path - append hostname to root_path (default = true)
970metrics - send data from table system.metrics
971events - send data from table system.events
972asynchronous_metrics - send data from table system.asynchronous_metrics
973-->
974<!--975<graphite>
976<host>localhost</host>
977<port>42000</port>
978<timeout>0.1</timeout>
979<interval>60</interval>
980<root_path>one_min</root_path>
981<hostname_in_path>true</hostname_in_path>
982
983<metrics>true</metrics>
984<events>true</events>
985<events_cumulative>false</events_cumulative>
986<asynchronous_metrics>true</asynchronous_metrics>
987</graphite>
988<graphite>
989<host>localhost</host>
990<port>42000</port>
991<timeout>0.1</timeout>
992<interval>1</interval>
993<root_path>one_sec</root_path>
994
995<metrics>true</metrics>
996<events>true</events>
997<events_cumulative>false</events_cumulative>
998<asynchronous_metrics>false</asynchronous_metrics>
999</graphite>
1000-->
1001
1002<!-- Serve endpoint for Prometheus monitoring. -->1003<!--1004endpoint - mertics path (relative to root, statring with "/")
1005port - port to setup server. If not defined or 0 than http_port used
1006metrics - send data from table system.metrics
1007events - send data from table system.events
1008asynchronous_metrics - send data from table system.asynchronous_metrics
1009-->
1010<!--1011<prometheus>
1012<endpoint>/metrics</endpoint>
1013<port>9363</port>
1014
1015<metrics>true</metrics>
1016<events>true</events>
1017<asynchronous_metrics>true</asynchronous_metrics>
1018</prometheus>
1019-->
1020
1021<!-- Query log. Used only for queries with setting log_queries = 1. -->1022<query_log>1023<!-- What table to insert data. If table is not exist, it will be created.1024When query log structure is changed after system update,
1025then old table will be renamed and new table will be created automatically.
1026-->
1027<database>system</database>1028<table>query_log</table>1029<!--1030PARTITION BY expr: https://clickhouse.com/docs/en/table_engines/mergetree-family/custom_partitioning_key/
1031Example:
1032event_date
1033toMonday(event_date)
1034toYYYYMM(event_date)
1035toStartOfHour(event_time)
1036-->
1037<partition_by>toYYYYMM(event_date)</partition_by>1038<!--1039Table TTL specification: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl
1040Example:
1041event_date + INTERVAL 1 WEEK
1042event_date + INTERVAL 7 DAY DELETE
1043event_date + INTERVAL 2 WEEK TO DISK 'bbb'
1044
1045<ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
1046-->
1047
1048<!--1049ORDER BY expr: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#order_by
1050Example:
1051event_date, event_time
1052event_date, type, query_id
1053event_date, event_time, initial_query_id
1054
1055<order_by>event_date, event_time, initial_query_id</order_by>
1056-->
1057
1058<!-- Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,1059Example: <engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
1060-->
1061
1062<!-- Interval of flushing data. -->1063<flush_interval_milliseconds>7500</flush_interval_milliseconds>1064<!-- Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk. -->1065<max_size_rows>1048576</max_size_rows>1066<!-- Pre-allocated size in lines for the logs. -->1067<reserved_size_rows>8192</reserved_size_rows>1068<!-- Lines amount threshold, reaching it launches flushing logs to the disk in background. -->1069<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1070<!-- Indication whether logs should be dumped to the disk in case of a crash -->1071<flush_on_crash>false</flush_on_crash>1072
1073<!-- example of using a different storage policy for a system table -->1074<!-- storage_policy>local_ssd</storage_policy -->1075</query_log>1076
1077<!-- Trace log. Stores stack traces collected by query profilers.1078See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
1079<trace_log>1080<database>system</database>1081<table>trace_log</table>1082
1083<partition_by>toYYYYMM(event_date)</partition_by>1084<flush_interval_milliseconds>7500</flush_interval_milliseconds>1085<max_size_rows>1048576</max_size_rows>1086<reserved_size_rows>8192</reserved_size_rows>1087<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1088<!-- Indication whether logs should be dumped to the disk in case of a crash -->1089<flush_on_crash>false</flush_on_crash>1090</trace_log>1091
1092<!-- Query thread log. Has information about all threads participated in query execution.1093Used only for queries with setting log_query_threads = 1. -->
1094<query_thread_log>1095<database>system</database>1096<table>query_thread_log</table>1097<partition_by>toYYYYMM(event_date)</partition_by>1098<flush_interval_milliseconds>7500</flush_interval_milliseconds>1099<max_size_rows>1048576</max_size_rows>1100<reserved_size_rows>8192</reserved_size_rows>1101<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1102<flush_on_crash>false</flush_on_crash>1103</query_thread_log>1104
1105<!-- Query views log. Has information about all dependent views associated with a query.1106Used only for queries with setting log_query_views = 1. -->
1107<query_views_log>1108<database>system</database>1109<table>query_views_log</table>1110<partition_by>toYYYYMM(event_date)</partition_by>1111<flush_interval_milliseconds>7500</flush_interval_milliseconds>1112</query_views_log>1113
1114<!-- Uncomment if use part log.1115Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).-->
1116<part_log>1117<database>system</database>1118<table>part_log</table>1119<partition_by>toYYYYMM(event_date)</partition_by>1120<flush_interval_milliseconds>7500</flush_interval_milliseconds>1121<max_size_rows>1048576</max_size_rows>1122<reserved_size_rows>8192</reserved_size_rows>1123<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1124<flush_on_crash>false</flush_on_crash>1125</part_log>1126
1127<!-- Uncomment to write text log into table.1128Text log contains all information from usual server log but stores it in structured and efficient way.
1129The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
1130<text_log>
1131<database>system</database>
1132<table>text_log</table>
1133<flush_interval_milliseconds>7500</flush_interval_milliseconds>
1134<max_size_rows>1048576</max_size_rows>
1135<reserved_size_rows>8192</reserved_size_rows>
1136<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1137<flush_on_crash>false</flush_on_crash>
1138<level></level>
1139</text_log>
1140-->
1141
1142<!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->1143<metric_log>1144<database>system</database>1145<table>metric_log</table>1146<flush_interval_milliseconds>7500</flush_interval_milliseconds>1147<max_size_rows>1048576</max_size_rows>1148<reserved_size_rows>8192</reserved_size_rows>1149<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1150<collect_interval_milliseconds>1000</collect_interval_milliseconds>1151<flush_on_crash>false</flush_on_crash>1152</metric_log>1153
1154<!--1155Asynchronous metric log contains values of metrics from
1156system.asynchronous_metrics.
1157-->
1158<asynchronous_metric_log>1159<database>system</database>1160<table>asynchronous_metric_log</table>1161<flush_interval_milliseconds>7000</flush_interval_milliseconds>1162<max_size_rows>1048576</max_size_rows>1163<reserved_size_rows>8192</reserved_size_rows>1164<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1165<flush_on_crash>false</flush_on_crash>1166</asynchronous_metric_log>1167
1168<!--1169OpenTelemetry log contains OpenTelemetry trace spans.
1170-->
1171<opentelemetry_span_log>1172<!--1173The default table creation code is insufficient, this <engine> spec
1174is a workaround. There is no 'event_time' for this log, but two times,
1175start and finish. It is sorted by finish time, to avoid inserting
1176data too far away in the past (probably we can sometimes insert a span
1177that is seconds earlier than the last span in the table, due to a race
1178between several spans inserted in parallel). This gives the spans a
1179global order that we can use to e.g. retry insertion into some external
1180system.
1181-->
1182<engine>1183engine MergeTree
1184partition by toYYYYMM(finish_date)
1185order by (finish_date, finish_time_us, trace_id)
1186</engine>1187<database>system</database>1188<table>opentelemetry_span_log</table>1189<flush_interval_milliseconds>7500</flush_interval_milliseconds>1190<max_size_rows>1048576</max_size_rows>1191<reserved_size_rows>8192</reserved_size_rows>1192<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1193<flush_on_crash>false</flush_on_crash>1194</opentelemetry_span_log>1195
1196
1197<!-- Crash log. Stores stack traces for fatal errors.1198This table is normally empty. -->
1199<crash_log>1200<database>system</database>1201<table>crash_log</table>1202
1203<partition_by />1204<flush_interval_milliseconds>1000</flush_interval_milliseconds>1205<max_size_rows>1024</max_size_rows>1206<reserved_size_rows>1024</reserved_size_rows>1207<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>1208<flush_on_crash>true</flush_on_crash>1209</crash_log>1210
1211<!-- Session log. Stores user log in (successful or not) and log out events.1212
1213Note: session log has known security issues and should not be used in production.
1214-->
1215<!-- <session_log>1216<database>system</database>
1217<table>session_log</table>
1218
1219<partition_by>toYYYYMM(event_date)</partition_by>
1220<flush_interval_milliseconds>7500</flush_interval_milliseconds>
1221<max_size_rows>1048576</max_size_rows>
1222<reserved_size_rows>8192</reserved_size_rows>
1223<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
1224<flush_on_crash>false</flush_on_crash>
1225</session_log> -->
1226
1227<!-- Profiling on Processors level. -->1228<processors_profile_log>1229<database>system</database>1230<table>processors_profile_log</table>1231
1232<partition_by>toYYYYMM(event_date)</partition_by>1233<flush_interval_milliseconds>7500</flush_interval_milliseconds>1234<max_size_rows>1048576</max_size_rows>1235<reserved_size_rows>8192</reserved_size_rows>1236<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1237<flush_on_crash>false</flush_on_crash>1238</processors_profile_log>1239
1240<!-- Log of asynchronous inserts. It allows to check status1241of insert query in fire-and-forget mode.
1242-->
1243<asynchronous_insert_log>1244<database>system</database>1245<table>asynchronous_insert_log</table>1246
1247<flush_interval_milliseconds>7500</flush_interval_milliseconds>1248<max_size_rows>1048576</max_size_rows>1249<reserved_size_rows>8192</reserved_size_rows>1250<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>1251<flush_on_crash>false</flush_on_crash>1252<partition_by>event_date</partition_by>1253<ttl>event_date + INTERVAL 3 DAY</ttl>1254</asynchronous_insert_log>1255
1256<!-- Backup/restore log.1257-->
1258<backup_log>1259<database>system</database>1260<table>backup_log</table>1261<partition_by>toYYYYMM(event_date)</partition_by>1262<flush_interval_milliseconds>7500</flush_interval_milliseconds>1263</backup_log>1264
1265<!-- Storage S3Queue log.1266-->
1267<s3queue_log>1268<database>system</database>1269<table>s3queue_log</table>1270<partition_by>toYYYYMM(event_date)</partition_by>1271<flush_interval_milliseconds>7500</flush_interval_milliseconds>1272</s3queue_log>1273
1274<!-- Blob storage object operations log.1275-->
1276<blob_storage_log>1277<database>system</database>1278<table>blob_storage_log</table>1279<partition_by>toYYYYMM(event_date)</partition_by>1280<flush_interval_milliseconds>7500</flush_interval_milliseconds>1281<ttl>event_date + INTERVAL 30 DAY</ttl>1282</blob_storage_log>1283
1284<!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->1285<!-- Custom TLD lists.1286Format: <name>/path/to/file</name>
1287
1288Changes will not be applied w/o server restart.
1289Path to the list is under top_level_domains_path (see above).
1290-->
1291<top_level_domains_lists>1292<!--1293<public_suffix_list>/path/to/public_suffix_list.dat</public_suffix_list>
1294-->
1295</top_level_domains_lists>1296
1297<!-- Configuration of external dictionaries. See:1298https://clickhouse.com/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts
1299-->
1300<dictionaries_config>*_dictionary.*ml</dictionaries_config>1301
1302<!-- Load dictionaries lazily, i.e. a dictionary will be loaded when it's used for the first time.1303"false" means ClickHouse will start loading dictionaries immediately at startup.
1304-->
1305<dictionaries_lazy_load>true</dictionaries_lazy_load>1306
1307<!-- Wait at startup until all the dictionaries finish their loading (successfully or not)1308before receiving any connections. Affects dictionaries only if "dictionaries_lazy_load" is false.
1309Setting this to false can make ClickHouse start faster, however some queries can be executed slower.
1310-->
1311<wait_dictionaries_load_at_startup>true</wait_dictionaries_load_at_startup>1312
1313<!-- Configuration of user defined executable functions -->1314<user_defined_executable_functions_config>*_function.*ml</user_defined_executable_functions_config>1315
1316<!-- Path in ZooKeeper to store user-defined SQL functions created by the command CREATE FUNCTION.1317If not specified they will be stored locally. -->
1318<!-- <user_defined_zookeeper_path>/clickhouse/user_defined</user_defined_zookeeper_path> -->1319
1320<!-- Uncomment if you want data to be compressed 30-100% better.1321Don't do that if you just started using ClickHouse.
1322-->
1323<!--1324<compression>
1325<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
1326<case>
1327
1328<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
1329<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
1330<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
1331
1332<!- - What compression method to use. - ->
1333<method>zstd</method>
1334</case>
1335</compression>
1336-->
1337
1338<!-- Configuration of encryption. The server executes a command to1339obtain an encryption key at startup if such a command is
1340defined, or encryption codecs will be disabled otherwise. The
1341command is executed through /bin/sh and is expected to write
1342a Base64-encoded key to the stdout. -->
1343<encryption_codecs>1344<!-- aes_128_gcm_siv -->1345<!-- Example of getting hex key from env -->1346<!-- the code should use this key and throw an exception if its length is not 16 bytes -->1347<!--key_hex from_env="..."></key_hex -->1348
1349<!-- Example of multiple hex keys. They can be imported from env or be written down in config-->1350<!-- the code should use these keys and throw an exception if their length is not 16 bytes -->1351<!-- key_hex id="0">...</key_hex -->1352<!-- key_hex id="1" from_env=".."></key_hex -->1353<!-- key_hex id="2">...</key_hex -->1354<!-- current_key_id>2</current_key_id -->1355
1356<!-- Example of getting hex key from config -->1357<!-- the code should use this key and throw an exception if its length is not 16 bytes -->1358<!-- key>...</key -->1359
1360<!-- example of adding nonce -->1361<!-- nonce>...</nonce -->1362
1363<!-- /aes_128_gcm_siv -->1364</encryption_codecs>1365
1366<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.1367Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
1368<distributed_ddl>1369<!-- Path in ZooKeeper to queue with DDL queries -->1370<path>/clickhouse/task_queue/ddl</path>1371
1372<!-- Settings from this profile will be used to execute DDL queries -->1373<!-- <profile>default</profile> -->1374
1375<!-- Controls how much ON CLUSTER queries can be run simultaneously. -->1376<!-- <pool_size>1</pool_size> -->1377
1378<!--1379Cleanup settings (active tasks will not be removed)
1380-->
1381
1382<!-- Controls task TTL (default 1 week) -->1383<!-- <task_max_lifetime>604800</task_max_lifetime> -->1384
1385<!-- Controls how often cleanup should be performed (in seconds) -->1386<!-- <cleanup_delay_period>60</cleanup_delay_period> -->1387
1388<!-- Controls how many tasks could be in the queue -->1389<!-- <max_tasks_in_queue>1000</max_tasks_in_queue> -->1390
1391<!-- Host name of the current node. If specified, will only compare and not resolve hostnames inside the DDL tasks -->1392<!-- <host_name>replica</host_name> -->1393</distributed_ddl>1394
1395<!-- Settings to fine-tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->1396<!--1397<merge_tree>
1398<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
1399</merge_tree>
1400-->
1401
1402<!-- Settings to fine-tune ReplicatedMergeTree tables. See documentation in source code, in MergeTreeSettings.h -->1403<!--1404<replicated_merge_tree>
1405<max_replicated_fetches_network_bandwidth>1000000000</max_replicated_fetches_network_bandwidth>
1406</replicated_merge_tree>
1407-->
1408
1409<!-- Settings to fine-tune Distributed tables. See documentation in source code, in DistributedSettings.h -->1410<!--1411<distributed>
1412<flush_on_detach>false</flush_on_detach>
1413</distributed>
1414-->
1415
1416<!-- Protection from accidental DROP.1417If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
1418If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
1419By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
1420The same for max_partition_size_to_drop.
1421Uncomment to disable protection.
1422-->
1423<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->1424<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->1425
1426<!-- Example of parameters for GraphiteMergeTree table engine -->1427<graphite_rollup_example>1428<pattern>1429<regexp>click_cost</regexp>1430<function>any</function>1431<retention>1432<age>0</age>1433<precision>3600</precision>1434</retention>1435<retention>1436<age>86400</age>1437<precision>60</precision>1438</retention>1439</pattern>1440<default>1441<function>max</function>1442<retention>1443<age>0</age>1444<precision>60</precision>1445</retention>1446<retention>1447<age>3600</age>1448<precision>300</precision>1449</retention>1450<retention>1451<age>86400</age>1452<precision>3600</precision>1453</retention>1454</default>1455</graphite_rollup_example>1456
1457<!-- Directory in <clickhouse-path> containing schema files for various input formats.1458The directory will be created if it doesn't exist.
1459-->
1460<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>1461
1462<!-- Directory containing the proto files for the well-known Protobuf types.1463-->
1464<google_protos_path>/usr/share/clickhouse/protos/</google_protos_path>1465
1466<!-- Default query masking rules, matching lines would be replaced with something else in the logs1467(both text logs and system.query_log).
1468name - name for the rule (optional)
1469regexp - RE2 compatible regular expression (mandatory)
1470replace - substitution string for sensitive data (optional, by default - six asterisks)
1471<query_masking_rules>
1472<rule>
1473<name>hide encrypt/decrypt arguments</name>
1474<regexp>((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)</regexp>
1475<replace>\1(???)</replace>
1476</rule>
1477</query_masking_rules> -->
1478
1479<!-- Uncomment to use custom http handlers.1480
1481rules are checked from top to bottom, first match runs the handler
1482url - to match request URL, you can use 'regex:' prefix to use regex match(optional)
1483empty_query_string - check that there is no query string in the URL
1484methods - to match request method, you can use commas to separate multiple method matches(optional)
1485headers - to match request headers, match each child element(child element name is header name), you can use 'regex:' prefix to use regex match(optional)
1486
1487handler is request handler
1488type - supported types: static, dynamic_query_handler, predefined_query_handler, redirect
1489query - use with predefined_query_handler type, executes query when the handler is called
1490query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
1491status - use with static type, response status code
1492content_type - use with static type, response content-type
1493response_content - use with static type, Response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
1494url - a location for redirect
1495
1496Along with a list of rules, you can specify <defaults/> which means - enable all the default handlers.
1497
1498<http_handlers>
1499<rule>
1500<url>/</url>
1501<methods>POST,GET</methods>
1502<headers><pragma>no-cache</pragma></headers>
1503<handler>
1504<type>dynamic_query_handler</type>
1505<query_param_name>query</query_param_name>
1506</handler>
1507</rule>
1508
1509<rule>
1510<url>/predefined_query</url>
1511<methods>POST,GET</methods>
1512<handler>
1513<type>predefined_query_handler</type>
1514<query>SELECT * FROM system.settings</query>
1515</handler>
1516</rule>
1517
1518<rule>
1519<handler>
1520<type>static</type>
1521<status>200</status>
1522<content_type>text/plain; charset=UTF-8</content_type>
1523<response_content>config://http_server_default_response</response_content>
1524</handler>
1525</rule>
1526</http_handlers>
1527-->
1528
1529<send_crash_reports>1530<!-- Changing <enabled> to true allows sending crash reports to -->1531<!-- the ClickHouse core developers team via Sentry https://sentry.io -->1532<!-- Doing so at least in pre-production environments is highly appreciated -->1533<enabled>false</enabled>1534<!-- Change <anonymize> to true if you don't feel comfortable attaching the server hostname to the crash report -->1535<anonymize>false</anonymize>1536<!-- Default endpoint should be changed to different Sentry DSN only if you have -->1537<!-- some in-house engineers or hired consultants who're going to debug ClickHouse issues for you -->1538<endpoint>https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277</endpoint>1539</send_crash_reports>1540
1541<!-- Uncomment to disable ClickHouse internal DNS caching. -->1542<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->1543
1544<!-- You can also configure rocksdb like this: -->1545<!-- Full list of options:1546- options:
1547- https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/options.h#L1452
1548- column_family_options:
1549- https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/options.h#L66
1550- block_based_table_options:
1551- https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/table/block_based/block_based_table_factory.cc#L228
1552- https://github.com/facebook/rocksdb/blob/4b013dcbed2df84fde3901d7655b9b91c557454d/include/rocksdb/table.h#L129
1553-->
1554<!--1555<rocksdb>
1556<options>
1557<max_background_jobs>8</max_background_jobs>
1558</options>
1559<column_family_options>
1560<num_levels>2</num_levels>
1561</column_family_options>
1562<block_based_table_options>
1563<block_size>1024</block_size>
1564</block_based_table_options>
1565<tables>
1566<table>
1567<name>TABLE</name>
1568<options>
1569<max_background_jobs>8</max_background_jobs>
1570</options>
1571<column_family_options>
1572<num_levels>2</num_levels>
1573</column_family_options>
1574<block_based_table_options>
1575<block_size>1024</block_size>
1576</block_based_table_options>
1577</table>
1578</tables>
1579</rocksdb>
1580-->
1581
1582<!-- Configuration for the query cache -->1583<query_cache>1584<max_size_in_bytes>1073741824</max_size_in_bytes>1585<max_entries>1024</max_entries>1586<max_entry_size_in_bytes>1048576</max_entry_size_in_bytes>1587<max_entry_size_in_rows>30000000</max_entry_size_in_rows>1588</query_cache>1589
1590<backups>1591<allowed_path>backups</allowed_path>1592
1593<!-- If the BACKUP command fails and this setting is true then the files1594copied before the failure will be removed automatically.
1595-->
1596<remove_backup_files_after_failure>true</remove_backup_files_after_failure>1597</backups>1598
1599<!-- This allows to disable exposing addresses in stack traces for security reasons.1600Please be aware that it does not improve security much, but makes debugging much harder.
1601The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
1602Regardless of this configuration, the addresses are visible in the system.stack_trace and system.trace_log tables
1603if the user has access to these tables.
1604I don't recommend to change this setting.
1605<show_addresses_in_stack_traces>false</show_addresses_in_stack_traces>
1606-->
1607
1608<!-- On Linux systems this can control the behavior of OOM killer.1609<oom_score>-1000</oom_score>
1610-->
1611
1612<!-- Delay (in seconds) to wait for unfinished queries before force exit -->1613<!-- <shutdown_wait_unfinished>5</shutdown_wait_unfinished> -->1614
1615<!-- If set true ClickHouse will wait for running queries finish before shutdown. -->1616<!-- <shutdown_wait_unfinished_queries>false</shutdown_wait_unfinished_queries> -->1617</clickhouse>1618