Restore
Creates a new ClickHouse cluster using the specified backup.
- TypeScript
- Python
import {
cloudApi,
decodeMessage,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";
const ClickhouseConfig_LogLevel =
cloudApi.mdb.clickhouse_config_clickhouse.ClickhouseConfig_LogLevel;
const Cluster = cloudApi.dataproc.cluster.Cluster;
const Cluster_Environment = cloudApi.mdb.clickhouse_cluster.Cluster_Environment;
const Compression_Method =
cloudApi.mdb.clickhouse_config_clickhouse.Compression_Method;
const Host_Type = cloudApi.mdb.clickhouse_cluster.Host_Type;
const Kafka_AutoOffsetReset =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_AutoOffsetReset;
const Kafka_Debug = cloudApi.mdb.clickhouse_config_clickhouse.Kafka_Debug;
const Kafka_SaslMechanism =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SaslMechanism;
const Kafka_SecurityProtocol =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SecurityProtocol;
const Layout_Type = cloudApi.mdb.clickhouse_config_clickhouse.Layout_Type;
const PostgresqlSource_SslMode =
cloudApi.mdb.clickhouse_config_clickhouse.PostgresqlSource_SslMode;
const RestoreClusterRequest =
cloudApi.mdb.clickhouse_cluster_service.RestoreClusterRequest;
(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.ClusterServiceClient);
const operation = await client.restore(
RestoreClusterRequest.fromPartial({
backupId: "backupId",
// additionalBackupIds: ["additionalBackupIds"],
name: "name",
// description: "description",
// labels: {"key": "labels"},
environment: Cluster_Environment.PRODUCTION,
configSpec: {
// version: "version",
// clickhouse: {
// config: {
// logLevel: ClickhouseConfig_LogLevel.TRACE,
// mergeTree: {
// replicatedDeduplicationWindow: {
// value: 0
// },
// replicatedDeduplicationWindowSeconds: {
// value: 0
// },
// partsToDelayInsert: {
// value: 0
// },
// partsToThrowInsert: {
// value: 0
// },
// inactivePartsToDelayInsert: {
// value: 0
// },
// inactivePartsToThrowInsert: {
// value: 0
// },
// maxReplicatedMergesInQueue: {
// value: 0
// },
// numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: {
// value: 0
// },
// maxBytesToMergeAtMinSpaceInPool: {
// value: 0
// },
// maxBytesToMergeAtMaxSpaceInPool: {
// value: 0
// },
// minBytesForWidePart: {
// value: 0
// },
// minRowsForWidePart: {
// value: 0
// },
// ttlOnlyDropParts: {
// value: true
// },
// allowRemoteFsZeroCopyReplication: {
// value: true
// },
// mergeWithTtlTimeout: {
// value: 0
// },
// mergeWithRecompressionTtlTimeout: {
// value: 0
// },
// maxPartsInTotal: {
// value: 0
// },
// maxNumberOfMergesWithTtlInPool: {
// value: 0
// },
// cleanupDelayPeriod: {
// value: 0
// },
// numberOfFreeEntriesInPoolToExecuteMutation: {
// value: 0
// },
// maxAvgPartSizeForTooManyParts: {
// value: 0
// },
// minAgeToForceMergeSeconds: {
// value: 0
// },
// minAgeToForceMergeOnPartitionOnly: {
// value: true
// },
// mergeSelectingSleepMs: {
// value: 0
// },
// mergeMaxBlockSize: {
// value: 0
// },
// checkSampleColumnIsCorrect: {
// value: true
// },
// maxMergeSelectingSleepMs: {
// value: 0
// },
// maxCleanupDelayPeriod: {
// value: 0
// }
// },
// compression: [{
// method: Compression_Method.LZ4,
// minPartSize: 0,
// minPartSizeRatio: 0,
// level: {
// value: 0
// }
// }],
// dictionaries: [{
// name: "name",
// structure: {
// id: {
// name: "name"
// },
// key: {
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// rangeMin: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// rangeMax: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// layout: {
// type: Layout_Type.FLAT,
// sizeInCells: 0,
// maxArraySize: 0
// },
// fixedLifetime: 0,
// lifetimeRange: {
// min: 0,
// max: 0
// },
// httpSource: {
// url: "url",
// format: "format",
// headers: [{
// name: "name",
// value: "value"
// }]
// },
// mysqlSource: {
// db: "db",
// table: "table",
// port: 0,
// user: "user",
// password: "password",
// replicas: [{
// host: "host",
// priority: 0,
// port: 0,
// user: "user",
// password: "password"
// }],
// where: "where",
// invalidateQuery: "invalidateQuery",
// closeConnection: {
// value: true
// },
// shareConnection: {
// value: true
// }
// },
// clickhouseSource: {
// db: "db",
// table: "table",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// where: "where",
// secure: {
// value: true
// }
// },
// mongodbSource: {
// db: "db",
// collection: "collection",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// options: "options"
// },
// postgresqlSource: {
// db: "db",
// table: "table",
// hosts: ["hosts"],
// port: 0,
// user: "user",
// password: "password",
// invalidateQuery: "invalidateQuery",
// sslMode: PostgresqlSource_SslMode.DISABLE
// }
// }],
// graphiteRollup: [{
// name: "name",
// patterns: [{
// regexp: "regexp",
// function: "function",
// retention: [{
// age: 0,
// precision: 0
// }]
// }],
// pathColumnName: "pathColumnName",
// timeColumnName: "timeColumnName",
// valueColumnName: "valueColumnName",
// versionColumnName: "versionColumnName"
// }],
// kafka: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// },
// kafkaTopics: [{
// name: "name",
// settings: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// }
// }],
// rabbitmq: {
// username: "username",
// password: "password",
// vhost: "vhost"
// },
// maxConnections: {
// value: 0
// },
// maxConcurrentQueries: {
// value: 0
// },
// keepAliveTimeout: {
// value: 0
// },
// uncompressedCacheSize: {
// value: 0
// },
// markCacheSize: {
// value: 0
// },
// maxTableSizeToDrop: {
// value: 0
// },
// maxPartitionSizeToDrop: {
// value: 0
// },
// builtinDictionariesReloadInterval: {
// value: 0
// },
// timezone: "timezone",
// geobaseEnabled: {
// value: true
// },
// geobaseUri: "geobaseUri",
// queryLogRetentionSize: {
// value: 0
// },
// queryLogRetentionTime: {
// value: 0
// },
// queryThreadLogEnabled: {
// value: true
// },
// queryThreadLogRetentionSize: {
// value: 0
// },
// queryThreadLogRetentionTime: {
// value: 0
// },
// partLogRetentionSize: {
// value: 0
// },
// partLogRetentionTime: {
// value: 0
// },
// metricLogEnabled: {
// value: true
// },
// metricLogRetentionSize: {
// value: 0
// },
// metricLogRetentionTime: {
// value: 0
// },
// traceLogEnabled: {
// value: true
// },
// traceLogRetentionSize: {
// value: 0
// },
// traceLogRetentionTime: {
// value: 0
// },
// textLogEnabled: {
// value: true
// },
// textLogRetentionSize: {
// value: 0
// },
// textLogRetentionTime: {
// value: 0
// },
// textLogLevel: ClickhouseConfig_LogLevel.TRACE,
// opentelemetrySpanLogEnabled: {
// value: true
// },
// opentelemetrySpanLogRetentionSize: {
// value: 0
// },
// opentelemetrySpanLogRetentionTime: {
// value: 0
// },
// queryViewsLogEnabled: {
// value: true
// },
// queryViewsLogRetentionSize: {
// value: 0
// },
// queryViewsLogRetentionTime: {
// value: 0
// },
// asynchronousMetricLogEnabled: {
// value: true
// },
// asynchronousMetricLogRetentionSize: {
// value: 0
// },
// asynchronousMetricLogRetentionTime: {
// value: 0
// },
// sessionLogEnabled: {
// value: true
// },
// sessionLogRetentionSize: {
// value: 0
// },
// sessionLogRetentionTime: {
// value: 0
// },
// zookeeperLogEnabled: {
// value: true
// },
// zookeeperLogRetentionSize: {
// value: 0
// },
// zookeeperLogRetentionTime: {
// value: 0
// },
// asynchronousInsertLogEnabled: {
// value: true
// },
// asynchronousInsertLogRetentionSize: {
// value: 0
// },
// asynchronousInsertLogRetentionTime: {
// value: 0
// },
// backgroundPoolSize: {
// value: 0
// },
// backgroundMergesMutationsConcurrencyRatio: {
// value: 0
// },
// backgroundSchedulePoolSize: {
// value: 0
// },
// backgroundFetchesPoolSize: {
// value: 0
// },
// backgroundMovePoolSize: {
// value: 0
// },
// backgroundDistributedSchedulePoolSize: {
// value: 0
// },
// backgroundBufferFlushSchedulePoolSize: {
// value: 0
// },
// backgroundMessageBrokerSchedulePoolSize: {
// value: 0
// },
// backgroundCommonPoolSize: {
// value: 0
// },
// defaultDatabase: {
// value: "value"
// },
// totalMemoryProfilerStep: {
// value: 0
// },
// totalMemoryTrackerSampleProbability: {
// value: 0
// },
// queryMaskingRules: [{
// name: "name",
// regexp: "regexp",
// replace: "replace"
// }],
// dictionariesLazyLoad: {
// value: true
// },
// queryCache: {
// maxSizeInBytes: {
// value: 0
// },
// maxEntries: {
// value: 0
// },
// maxEntrySizeInBytes: {
// value: 0
// },
// maxEntrySizeInRows: {
// value: 0
// }
// }
// },
// resources: {
// resourcePresetId: "resourcePresetId",
// diskSize: 0,
// diskTypeId: "diskTypeId"
// }
// },
// zookeeper: {
// resources: {
// resourcePresetId: "resourcePresetId",
// diskSize: 0,
// diskTypeId: "diskTypeId"
// }
// },
// backupWindowStart: {
// hours: 0,
// minutes: 0,
// seconds: 0,
// nanos: 0
// },
// access: {
// dataLens: true,
// webSql: true,
// metrika: true,
// serverless: true,
// dataTransfer: true,
// yandexQuery: true
// },
// cloudStorage: {
// enabled: true,
// moveFactor: {
// value: 0
// },
// dataCacheEnabled: {
// value: true
// },
// dataCacheMaxSize: {
// value: 0
// },
// preferNotToMerge: {
// value: true
// }
// },
// sqlDatabaseManagement: {
// value: true
// },
// sqlUserManagement: {
// value: true
// },
// adminPassword: "adminPassword",
// embeddedKeeper: {
// value: true
// },
// backupRetainPeriodDays: {
// value: 0
// }
},
// hostSpecs: [{
// zoneId: "zoneId",
// type: Host_Type.CLICKHOUSE,
// subnetId: "subnetId",
// assignPublicIp: true,
// shardName: "shardName"
// }],
networkId: "networkId",
// folderId: "folderId",
// serviceAccountId: "serviceAccountId",
// securityGroupIds: ["securityGroupIds"],
// deletionProtection: true
})
);
const finishedOp = await waitForOperation(operation, session);
if (finishedOp.response) {
const result = decodeMessage<typeof Cluster>(finishedOp.response);
console.log(result);
}
})();
import os
import grpc
import yandexcloud
import {google} from "googleapis";
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Access
from yandex.cloud.mdb.clickhouse.v1.config.clickhouse_pb2 import ClickhouseConfig
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import CloudStorage
from yandex.cloud.dataproc.v1.cluster_pb2 import Cluster
from yandex.cloud.dataproc.v1.cluster_service_pb2_grpc import ClusterServiceStub
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import ConfigSpec
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import HostSpec
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Resources
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import RestoreClusterMetadata
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import RestoreClusterRequest
const TimeOfDay = google.type.timeofday;
token = os.getenv('YC_OAUTH_TOKEN')
sdk = yandexcloud.SDK(token=token)
service = sdk.client(ClusterServiceStub)
operation = service.Restore(
RestoreClusterRequest(
backup_id = "backupId",
# additional_backup_ids = ["additionalBackupIds"],
name = "name",
# description = "description",
# labels = {"key": "labels"},
environment = Cluster.Environment.PRODUCTION,
config_spec = ConfigSpec(
# version = "version",
# clickhouse = ConfigSpec.Clickhouse(
# config = ClickhouseConfig(
# log_level = ClickhouseConfig.LogLevel.TRACE,
# merge_tree = ClickhouseConfig.MergeTree(
# replicated_deduplication_window = Int64Value(
# value = 0
# ),
# replicated_deduplication_window_seconds = Int64Value(
# value = 0
# ),
# parts_to_delay_insert = Int64Value(
# value = 0
# ),
# parts_to_throw_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_delay_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_throw_insert = Int64Value(
# value = 0
# ),
# max_replicated_merges_in_queue = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_lower_max_size_of_merge = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_min_space_in_pool = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_max_space_in_pool = Int64Value(
# value = 0
# ),
# min_bytes_for_wide_part = Int64Value(
# value = 0
# ),
# min_rows_for_wide_part = Int64Value(
# value = 0
# ),
# ttl_only_drop_parts = BoolValue(
# value = true
# ),
# allow_remote_fs_zero_copy_replication = BoolValue(
# value = true
# ),
# merge_with_ttl_timeout = Int64Value(
# value = 0
# ),
# merge_with_recompression_ttl_timeout = Int64Value(
# value = 0
# ),
# max_parts_in_total = Int64Value(
# value = 0
# ),
# max_number_of_merges_with_ttl_in_pool = Int64Value(
# value = 0
# ),
# cleanup_delay_period = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_execute_mutation = Int64Value(
# value = 0
# ),
# max_avg_part_size_for_too_many_parts = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_seconds = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_on_partition_only = BoolValue(
# value = true
# ),
# merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# merge_max_block_size = Int64Value(
# value = 0
# ),
# check_sample_column_is_correct = BoolValue(
# value = true
# ),
# max_merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# max_cleanup_delay_period = Int64Value(
# value = 0
# )
# ),
# compression = [ClickhouseConfig.Compression(
# method = Compression.Method.LZ4,
# min_part_size = 0,
# min_part_size_ratio = 0,
# level = Int64Value(
# value = 0
# )
# )],
# dictionaries = [ClickhouseConfig.ExternalDictionary(
# name = "name",
# structure = ExternalDictionary.Structure(
# id = Structure.Id(
# name = "name"
# ),
# key = Structure.Key(
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# range_min = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# range_max = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# layout = ExternalDictionary.Layout(
# type = Layout.Type.FLAT,
# size_in_cells = 0,
# max_array_size = 0
# ),
# fixed_lifetime = 0,
# lifetime_range = ExternalDictionary.Range(
# min = 0,
# max = 0
# ),
# http_source = ExternalDictionary.HttpSource(
# url = "url",
# format = "format",
# headers = [HttpSource.Header(
# name = "name",
# value = "value"
# )]
# ),
# mysql_source = ExternalDictionary.MysqlSource(
# db = "db",
# table = "table",
# port = 0,
# user = "user",
# password = "password",
# replicas = [MysqlSource.Replica(
# host = "host",
# priority = 0,
# port = 0,
# user = "user",
# password = "password"
# )],
# where = "where",
# invalidate_query = "invalidateQuery",
# close_connection = BoolValue(
# value = true
# ),
# share_connection = BoolValue(
# value = true
# )
# ),
# clickhouse_source = ExternalDictionary.ClickhouseSource(
# db = "db",
# table = "table",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# where = "where",
# secure = BoolValue(
# value = true
# )
# ),
# mongodb_source = ExternalDictionary.MongodbSource(
# db = "db",
# collection = "collection",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# options = "options"
# ),
# postgresql_source = ExternalDictionary.PostgresqlSource(
# db = "db",
# table = "table",
# hosts = ["hosts"],
# port = 0,
# user = "user",
# password = "password",
# invalidate_query = "invalidateQuery",
# ssl_mode = PostgresqlSource.SslMode.DISABLE
# )
# )],
# graphite_rollup = [ClickhouseConfig.GraphiteRollup(
# name = "name",
# patterns = [GraphiteRollup.Pattern(
# regexp = "regexp",
# function = "function",
# retention = [Pattern.Retention(
# age = 0,
# precision = 0
# )]
# )],
# path_column_name = "pathColumnName",
# time_column_name = "timeColumnName",
# value_column_name = "valueColumnName",
# version_column_name = "versionColumnName"
# )],
# kafka = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# ),
# kafka_topics = [ClickhouseConfig.KafkaTopic(
# name = "name",
# settings = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# )
# )],
# rabbitmq = ClickhouseConfig.Rabbitmq(
# username = "username",
# password = "password",
# vhost = "vhost"
# ),
# max_connections = Int64Value(
# value = 0
# ),
# max_concurrent_queries = Int64Value(
# value = 0
# ),
# keep_alive_timeout = Int64Value(
# value = 0
# ),
# uncompressed_cache_size = Int64Value(
# value = 0
# ),
# mark_cache_size = Int64Value(
# value = 0
# ),
# max_table_size_to_drop = Int64Value(
# value = 0
# ),
# max_partition_size_to_drop = Int64Value(
# value = 0
# ),
# builtin_dictionaries_reload_interval = Int64Value(
# value = 0
# ),
# timezone = "timezone",
# geobase_enabled = BoolValue(
# value = true
# ),
# geobase_uri = "geobaseUri",
# query_log_retention_size = Int64Value(
# value = 0
# ),
# query_log_retention_time = Int64Value(
# value = 0
# ),
# query_thread_log_enabled = BoolValue(
# value = true
# ),
# query_thread_log_retention_size = Int64Value(
# value = 0
# ),
# query_thread_log_retention_time = Int64Value(
# value = 0
# ),
# part_log_retention_size = Int64Value(
# value = 0
# ),
# part_log_retention_time = Int64Value(
# value = 0
# ),
# metric_log_enabled = BoolValue(
# value = true
# ),
# metric_log_retention_size = Int64Value(
# value = 0
# ),
# metric_log_retention_time = Int64Value(
# value = 0
# ),
# trace_log_enabled = BoolValue(
# value = true
# ),
# trace_log_retention_size = Int64Value(
# value = 0
# ),
# trace_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_enabled = BoolValue(
# value = true
# ),
# text_log_retention_size = Int64Value(
# value = 0
# ),
# text_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_level = ClickhouseConfig.LogLevel.TRACE,
# opentelemetry_span_log_enabled = BoolValue(
# value = true
# ),
# opentelemetry_span_log_retention_size = Int64Value(
# value = 0
# ),
# opentelemetry_span_log_retention_time = Int64Value(
# value = 0
# ),
# query_views_log_enabled = BoolValue(
# value = true
# ),
# query_views_log_retention_size = Int64Value(
# value = 0
# ),
# query_views_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_metric_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_retention_time = Int64Value(
# value = 0
# ),
# session_log_enabled = BoolValue(
# value = true
# ),
# session_log_retention_size = Int64Value(
# value = 0
# ),
# session_log_retention_time = Int64Value(
# value = 0
# ),
# zookeeper_log_enabled = BoolValue(
# value = true
# ),
# zookeeper_log_retention_size = Int64Value(
# value = 0
# ),
# zookeeper_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_insert_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_retention_time = Int64Value(
# value = 0
# ),
# background_pool_size = Int64Value(
# value = 0
# ),
# background_merges_mutations_concurrency_ratio = Int64Value(
# value = 0
# ),
# background_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_fetches_pool_size = Int64Value(
# value = 0
# ),
# background_move_pool_size = Int64Value(
# value = 0
# ),
# background_distributed_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_buffer_flush_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_message_broker_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_common_pool_size = Int64Value(
# value = 0
# ),
# default_database = StringValue(
# value = "value"
# ),
# total_memory_profiler_step = Int64Value(
# value = 0
# ),
# total_memory_tracker_sample_probability = DoubleValue(
# value = 0
# ),
# query_masking_rules = [ClickhouseConfig.QueryMaskingRule(
# name = "name",
# regexp = "regexp",
# replace = "replace"
# )],
# dictionaries_lazy_load = BoolValue(
# value = true
# ),
# query_cache = ClickhouseConfig.QueryCache(
# max_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entries = Int64Value(
# value = 0
# ),
# max_entry_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entry_size_in_rows = Int64Value(
# value = 0
# )
# )
# ),
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_size = 0,
# disk_type_id = "diskTypeId"
# )
# ),
# zookeeper = ConfigSpec.Zookeeper(
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_size = 0,
# disk_type_id = "diskTypeId"
# )
# ),
# backup_window_start = TimeOfDay(
# hours = 0,
# minutes = 0,
# seconds = 0,
# nanos = 0
# ),
# access = Access(
# data_lens = true,
# web_sql = true,
# metrika = true,
# serverless = true,
# data_transfer = true,
# yandex_query = true
# ),
# cloud_storage = CloudStorage(
# enabled = true,
# move_factor = DoubleValue(
# value = 0
# ),
# data_cache_enabled = BoolValue(
# value = true
# ),
# data_cache_max_size = Int64Value(
# value = 0
# ),
# prefer_not_to_merge = BoolValue(
# value = true
# )
# ),
# sql_database_management = BoolValue(
# value = true
# ),
# sql_user_management = BoolValue(
# value = true
# ),
# admin_password = "adminPassword",
# embedded_keeper = BoolValue(
# value = true
# ),
# backup_retain_period_days = Int64Value(
# value = 0
# )
),
# host_specs = [HostSpec(
# zone_id = "zoneId",
# type = Host.Type.CLICKHOUSE,
# subnet_id = "subnetId",
# assign_public_ip = true,
# shard_name = "shardName"
# )],
network_id = "networkId",
# folder_id = "folderId",
# service_account_id = "serviceAccountId",
# security_group_ids = ["securityGroupIds"],
# deletion_protection = true
)
)
operation_result = sdk.wait_operation_and_get_result(
operation,
response_type=Cluster,
meta_type=RestoreClusterMetadata,
)
print(operation_result)
RestoreClusterRequest
backupId
: string
ID of the backup to restore from. This backup will be used to create one cluster shard. To get the backup ID, use a ClusterService.ListBackups request.
additionalBackupIds
: string
Additional IDs of the backups to restore from. Each additional backup is responsible for restoring separate shard. Restored cluster will have len(additional_backup_ids)+1 shards in total. To get the backup ID, use a ClusterService.ListBackups request.
name
: string
Name of the new ClickHouse cluster. The name must be unique within the folder.
description
: string
Description of the new ClickHouse cluster.
labels
: string
Custom labels for the ClickHouse cluster as key:value
pairs. Maximum 64 per resource.
For example, "project": "mvp" or "source": "dictionary".
environment
: Cluster.Environment
Deployment environment of the new ClickHouse cluster.
configSpec
: ConfigSpec
Configuration for the ClickHouse cluster to be created.
hostSpecs
: HostSpec
Configurations for ClickHouse hosts that should be created for the cluster that is being created from the backup.
networkId
: string
ID of the network to create the ClickHouse cluster in.
folderId
: string
ID of the folder to create the ClickHouse cluster in.
serviceAccountId
: string
ID of the service account used for access to Object Storage.
securityGroupIds
: string
User security groups
deletionProtection
: bool
Deletion Protection inhibits deletion of the cluster
ConfigSpec
Clickhouse
config
: config.ClickhouseConfig
Configuration for a ClickHouse server.
resources
: Resources
Resources allocated to ClickHouse hosts.
Zookeeper
resources
: Resources
Resources allocated to ZooKeeper hosts. If not set, minimal available resources will be used. All available resource presets can be retrieved with a ResourcePresetService.List request.
version
: string
Version of the ClickHouse server software.
clickhouse
: Clickhouse
Configuration and resources for a ClickHouse server.
zookeeper
: Zookeeper
Configuration and resources for a ZooKeeper server.
backupWindowStart
: google.type.TimeOfDay
Time to start the daily backup, in the UTC timezone.
access
: Access
Access policy for external services.
If you want a specific service to access the ClickHouse cluster, then set the necessary values in this policy.
cloudStorage
: CloudStorage
sqlDatabaseManagement
: google.protobuf.BoolValue
Whether database management through SQL commands is enabled.
sqlUserManagement
: google.protobuf.BoolValue
Whether user management through SQL commands is enabled.
adminPassword
: string
Password for user 'admin' that has SQL user management access.
embeddedKeeper
: google.protobuf.BoolValue
Whether cluster should use embedded Keeper instead of Zookeeper
backupRetainPeriodDays
: google.protobuf.Int64Value
Retain period of automatically created backup in days
HostSpec
zoneId
: string
ID of the availability zone where the host resides. To get a list of available zones, use the yandex.cloud.compute.v1.ZoneService.List request.
type
: Host.Type
Type of the host to be deployed.
subnetId
: string
ID of the subnet that the host should belong to. This subnet should be a part of the network that the cluster belongs to. The ID of the network is set in the Cluster.network_id field.
assignPublicIp
: bool
Whether the host should get a public IP address on creation.
After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign a public IP to a host without one, recreate the host with assign_public_ip set as needed.
Possible values:
- false - don't assign a public IP to the host.
- true - the host should have a public IP address.
shardName
: string
Name of the shard that the host is assigned to.
ClickhouseConfig
ClickHouse configuration options. Detailed description for each set of options is available in ClickHouse documentation.
Any options not listed here are not supported.
LogLevel
LOG_LEVEL_UNSPECIFIED
TRACE
DEBUG
INFORMATION
WARNING
ERROR
MergeTree
Options specific to the MergeTree table engine.
replicatedDeduplicationWindow
: google.protobuf.Int64Value
Number of blocks of hashes to keep in ZooKeeper.
replicatedDeduplicationWindowSeconds
: google.protobuf.Int64Value
Period of time to keep blocks of hashes for.
partsToDelayInsert
: google.protobuf.Int64Value
If table contains at least that many active parts in single partition, artificially slow down insert into table.
partsToThrowInsert
: google.protobuf.Int64Value
If more than this number active parts in single partition, throw 'Too many parts ...' exception.
inactivePartsToDelayInsert
: google.protobuf.Int64Value
inactivePartsToThrowInsert
: google.protobuf.Int64Value
maxReplicatedMergesInQueue
: google.protobuf.Int64Value
How many tasks of merging and mutating parts are allowed simultaneously in ReplicatedMergeTree queue.
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge
: google.protobuf.Int64Value
If there is less than specified number of free entries in background pool (or replicated queue), start to lower maximum size of merge to process.
maxBytesToMergeAtMinSpaceInPool
: google.protobuf.Int64Value
Maximum in total size of parts to merge, when there are minimum free threads in background pool (or entries in replication queue).
maxBytesToMergeAtMaxSpaceInPool
: google.protobuf.Int64Value
minBytesForWidePart
: google.protobuf.Int64Value
Minimum number of bytes in a data part that can be stored in Wide format.
More info see in ClickHouse documentation.
minRowsForWidePart
: google.protobuf.Int64Value
Minimum number of rows in a data part that can be stored in Wide format.
More info see in ClickHouse documentation.
ttlOnlyDropParts
: google.protobuf.BoolValue
Enables or disables complete dropping of data parts where all rows are expired in MergeTree tables.
More info see in ClickHouse documentation.
allowRemoteFsZeroCopyReplication
: google.protobuf.BoolValue
mergeWithTtlTimeout
: google.protobuf.Int64Value
mergeWithRecompressionTtlTimeout
: google.protobuf.Int64Value
maxPartsInTotal
: google.protobuf.Int64Value
maxNumberOfMergesWithTtlInPool
: google.protobuf.Int64Value
cleanupDelayPeriod
: google.protobuf.Int64Value
numberOfFreeEntriesInPoolToExecuteMutation
: google.protobuf.Int64Value
maxAvgPartSizeForTooManyParts
: google.protobuf.Int64Value
The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. Default: 1 GiB Min version: 22.10 See in-depth description in ClickHouse GitHub
minAgeToForceMergeSeconds
: google.protobuf.Int64Value
Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. Default: 0 - disabled Min_version: 22.10 See in-depth description in ClickHouse documentation
minAgeToForceMergeOnPartitionOnly
: google.protobuf.BoolValue
Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. Default: false Min_version: 22.11 See in-depth description in ClickHouse documentation
mergeSelectingSleepMs
: google.protobuf.Int64Value
Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. Default: 5000 Min_version: 21.10 See in-depth description in ClickHouse documentation
mergeMaxBlockSize
: google.protobuf.Int64Value
The number of rows that are read from the merged parts into memory. Default: 8192 See in-depth description in ClickHouse documentation
checkSampleColumnIsCorrect
: google.protobuf.BoolValue
Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default: true See in-depth description in ClickHouse documentation
maxMergeSelectingSleepMs
: google.protobuf.Int64Value
Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default: 60000 Min_version: 23.6 See in-depth description in ClickHouse GitHub
maxCleanupDelayPeriod
: google.protobuf.Int64Value
Maximum period to clean old queue logs, blocks hashes and parts. Default: 300 Min_version: 23.6 See in-depth description in ClickHouse GitHub
Kafka
SecurityProtocol
SECURITY_PROTOCOL_UNSPECIFIED
SECURITY_PROTOCOL_PLAINTEXT
SECURITY_PROTOCOL_SSL
SECURITY_PROTOCOL_SASL_PLAINTEXT
SECURITY_PROTOCOL_SASL_SSL
SaslMechanism
SASL_MECHANISM_UNSPECIFIED
SASL_MECHANISM_GSSAPI
SASL_MECHANISM_PLAIN
SASL_MECHANISM_SCRAM_SHA_256
SASL_MECHANISM_SCRAM_SHA_512
Debug
DEBUG_UNSPECIFIED
DEBUG_GENERIC
DEBUG_BROKER
DEBUG_TOPIC
DEBUG_METADATA
DEBUG_FEATURE
DEBUG_QUEUE
DEBUG_MSG
DEBUG_PROTOCOL
DEBUG_CGRP
DEBUG_SECURITY
DEBUG_FETCH
DEBUG_INTERCEPTOR
DEBUG_PLUGIN
DEBUG_CONSUMER
DEBUG_ADMIN
DEBUG_EOS
DEBUG_MOCK
DEBUG_ASSIGNOR
DEBUG_CONF
DEBUG_TELEMETRY
DEBUG_ALL
AutoOffsetReset
AUTO_OFFSET_RESET_UNSPECIFIED
AUTO_OFFSET_RESET_SMALLEST
AUTO_OFFSET_RESET_EARLIEST
AUTO_OFFSET_RESET_BEGINNING
AUTO_OFFSET_RESET_LARGEST
AUTO_OFFSET_RESET_LATEST
AUTO_OFFSET_RESET_END
AUTO_OFFSET_RESET_ERROR
securityProtocol
: SecurityProtocol
saslMechanism
: SaslMechanism
saslUsername
: string
saslPassword
: string
enableSslCertificateVerification
: google.protobuf.BoolValue
maxPollIntervalMs
: google.protobuf.Int64Value
sessionTimeoutMs
: google.protobuf.Int64Value
debug
: Debug
autoOffsetReset
: AutoOffsetReset
KafkaTopic
name
: string
settings
: Kafka
Rabbitmq
username
: string
RabbitMQ username
password
: string
RabbitMQ password
vhost
: string
RabbitMQ virtual host
Compression
Method
METHOD_UNSPECIFIED
LZ4
ZSTD
method
: Method
Compression method to use for the specified combination of min_part_size and min_part_size_ratio.
minPartSize
: int64
Minimum size of a part of a table.
minPartSizeRatio
: double
Minimum ratio of a part relative to the size of all the data in the table.
level
: google.protobuf.Int64Value
ExternalDictionary
HttpSource
####### Header {#httpsource_header}
name
: string
value
: string
url
: string
URL of the source dictionary available over HTTP.
format
: string
The data format. Valid values are all formats supported by ClickHouse SQL dialect.
headers
: Header
HTTP headers.
MysqlSource
####### Replica {#mysqlsource_replica}
host
: string
MySQL host of the replica.
priority
: int64
The priority of the replica that ClickHouse takes into account when connecting. Replica with the highest priority should have this field set to the lowest number.
port
: int64
Port to use when connecting to the replica. If a port is not specified for a replica, ClickHouse uses the port specified for the source.
user
: string
Name of the MySQL database user.
password
: string
Password of the MySQL database user.
db
: string
Name of the MySQL database to connect to.
table
: string
Name of the database table to use as a ClickHouse dictionary.
port
: int64
Default port to use when connecting to a replica of the dictionary source.
user
: string
Name of the default user for replicas of the dictionary source.
password
: string
Password of the default user for replicas of the dictionary source.
replicas
: Replica
List of MySQL replicas of the database used as dictionary source.
where
: string
Selection criteria for the data in the specified MySQL table.
invalidateQuery
: string
Query for checking the dictionary status, to pull only updated data. For more details, see ClickHouse documentation on dictionaries.
closeConnection
: google.protobuf.BoolValue
Should the connection be closed after each request.
shareConnection
: google.protobuf.BoolValue
Should a connection be shared for some requests.
ClickhouseSource
db
: string
Name of the ClickHouse database.
table
: string
Name of the table in the specified database to be used as the dictionary source.
host
: string
ClickHouse host of the specified database.
port
: int64
Port to use when connecting to the host.
user
: string
Name of the ClickHouse database user.
password
: string
Password of the ClickHouse database user.
where
: string
Selection criteria for the data in the specified ClickHouse table.
secure
: google.protobuf.BoolValue
Use ssl for connection.
MongodbSource
db
: string
Name of the MongoDB database.
collection
: string
Name of the collection in the specified database to be used as the dictionary source.
host
: string
MongoDB host of the specified database.
port
: int64
Port to use when connecting to the host.
user
: string
Name of the MongoDB database user.
password
: string
Password of the MongoDB database user.
options
: string
PostgresqlSource
####### SslMode {#postgresqlsource_sslmode}
SSL_MODE_UNSPECIFIED
DISABLE
Only try a non-SSL connection.
ALLOW
First try a non-SSL connection; if that fails, try an SSL connection.
PREFER
First try an SSL connection; if that fails, try a non-SSL connection.
VERIFY_CA
Only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA).
VERIFY_FULL
Only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate.
db
: string
Name of the PostrgreSQL database.
table
: string
Name of the table in the specified database to be used as the dictionary source.
hosts
: string
Name of the PostrgreSQL host
port
: int64
Port to use when connecting to the host.
user
: string
Name of the PostrgreSQL database user.
password
: string
Password of the PostrgreSQL database user.
invalidateQuery
: string
Query for checking the dictionary status, to pull only updated data. For more details, see ClickHouse documentation on dictionaries.
sslMode
: SslMode
Mode of SSL TCP/IP connection to the PostgreSQL host. For more details, see PostgreSQL documentation.
Structure
####### Attribute {#structure_attribute}
name
: string
Name of the column.
type
: string
Type of the column.
nullValue
: string
Default value for an element without data (for example, an empty string).
expression
: string
Expression, describing the attribute, if applicable.
hierarchical
: bool
Indication of hierarchy support.
Default value: false
.
injective
: bool
Indication of injective mapping "id -> attribute".
Default value: false
.
####### Id {#structure_id}
Numeric key.
name
: string
Name of the numeric key.
####### Key {#structure_key}
Complex key.
attributes
: Attribute
Attributes of a complex key.
id
: Id
Single numeric key column for the dictionary.
key
: Key
Composite key for the dictionary, containing of one or more key columns. For details, see ClickHouse documentation.
rangeMin
: Attribute
Field holding the beginning of the range for dictionaries with RANGE_HASHED
layout.
For details, see ClickHouse documentation.
rangeMax
: Attribute
Field holding the end of the range for dictionaries with RANGE_HASHED
layout.
For details, see ClickHouse documentation.
attributes
: Attribute
Description of the fields available for database queries. For details, see ClickHouse documentation.
Layout
Layout determining how to store the dictionary in memory.
####### Type {#layout_type}
TYPE_UNSPECIFIED
FLAT
The entire dictionary is stored in memory in the form of flat arrays. Available for all dictionary sources.
HASHED
The entire dictionary is stored in memory in the form of a hash table. Available for all dictionary sources.
COMPLEX_KEY_HASHED
Similar to HASHED, to be used with composite keys. Available for all dictionary sources.
RANGE_HASHED
The entire dictionary is stored in memory in the form of a hash table, with an ordered array of ranges and their corresponding values. Available for all dictionary sources.
CACHE
The dictionary is stored in a cache with a set number of cells. Available for MySQL, ClickHouse and HTTP dictionary sources.
COMPLEX_KEY_CACHE
Similar to CACHE, to be used with composite keys. Available for MySQL, ClickHouse and HTTP dictionary sources.
type
: Type
Layout type for an external dictionary.
sizeInCells
: int64
Number of cells in the cache. Rounded up to a power of two. Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
maxArraySize
: int64
Maximum dictionary key size. Applicable only for FLAT layout type.
Range
min
: int64
Minimum dictionary lifetime.
max
: int64
Maximum dictionary lifetime.
name
: string
Name of the external dictionary.
structure
: Structure
Set of attributes for the external dictionary. For in-depth description, see ClickHouse documentation.
layout
: Layout
Layout for storing the dictionary in memory. For in-depth description, see ClickHouse documentation.
One of lifetime
Setting for the period of time between dictionary updates. For details, see ClickHouse documentation.
fixedLifetime
: int64Fixed interval between dictionary updates.
lifetimeRange
: RangeRange of intervals between dictionary updates for ClickHouse to choose from.
One of source
Description of the source for the external dictionary.
httpSource
: HttpSourceHTTP source for the dictionary.
mysqlSource
: MysqlSourceMySQL source for the dictionary.
clickhouseSource
: ClickhouseSourceClickHouse source for the dictionary.
mongodbSource
: MongodbSourceMongoDB source for the dictionary.
postgresqlSource
: PostgresqlSourcePostgreSQL source for the dictionary.
GraphiteRollup
Rollup settings for the GraphiteMergeTree table engine.
Pattern
####### Retention {#pattern_retention}
age
: int64
Minimum age of the data in seconds.
precision
: int64
Precision of determining the age of the data, in seconds.
regexp
: string
Pattern for metric names.
function
: string
Name of the aggregating function to apply to data of the age specified in retention.
retention
: Retention
Age of data to use for thinning.
name
: string
Name for the specified combination of settings for Graphite rollup.
patterns
: Pattern
Pattern to use for the rollup.
pathColumnName
: string
The name of the column storing the metric name (Graphite sensor). Default: Path See in-depth description in ClickHouse documentation
timeColumnName
: string
The name of the column storing the time of measuring the metric. Default: Time See in-depth description in ClickHouse documentation
valueColumnName
: string
The name of the column storing the value of the metric at the time set in time_column_name. Default: Value See in-depth description in ClickHouse documentation
versionColumnName
: string
The name of the column storing the version of the metric. Default: Timestamp See in-depth description in ClickHouse documentation
QueryMaskingRule
name
: string
Name for the rule.
regexp
: string
RE2 compatible regular expression. Required.
replace
: string
Substitution string for sensitive data. Default: six asterisks
QueryCache
maxSizeInBytes
: google.protobuf.Int64Value
The maximum cache size in bytes. Default: 1073741824 (1 GiB)
maxEntries
: google.protobuf.Int64Value
The maximum number of SELECT query results stored in the cache. Default: 1024
maxEntrySizeInBytes
: google.protobuf.Int64Value
The maximum size in bytes SELECT query results may have to be saved in the cache. Dafault: 1048576 (1 MiB)
maxEntrySizeInRows
: google.protobuf.Int64Value
The maximum number of rows SELECT query results may have to be saved in the cache. Default: 30000000 (30 mil)
logLevel
: LogLevel
Logging level for the ClickHouse cluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR.
mergeTree
: MergeTree
Settings for the MergeTree engine. See description in ClickHouse documentation.
compression
: Compression
Compression settings for the ClickHouse cluster. See in-depth description in ClickHouse documentation.
dictionaries
: ExternalDictionary
Configuration of external dictionaries to be used by the ClickHouse cluster. See in-depth description in ClickHouse documentation.
graphiteRollup
: GraphiteRollup
Settings for thinning Graphite data. See in-depth description in ClickHouse documentation.
kafka
: Kafka
kafkaTopics
: KafkaTopic
rabbitmq
: Rabbitmq
maxConnections
: google.protobuf.Int64Value
Maximum number of inbound connections.
maxConcurrentQueries
: google.protobuf.Int64Value
Maximum number of simultaneously processed requests.
keepAliveTimeout
: google.protobuf.Int64Value
Number of milliseconds that ClickHouse waits for incoming requests before closing the connection.
uncompressedCacheSize
: google.protobuf.Int64Value
Cache size (in bytes) for uncompressed data used by MergeTree tables.
markCacheSize
: google.protobuf.Int64Value
Approximate size (in bytes) of the cache of "marks" used by MergeTree tables.
maxTableSizeToDrop
: google.protobuf.Int64Value
Maximum size of the table that can be deleted using a DROP query.
maxPartitionSizeToDrop
: google.protobuf.Int64Value
Maximum size of the partition that can be deleted using a DROP query.
builtinDictionariesReloadInterval
: google.protobuf.Int64Value
The setting is deprecated and has no effect.
timezone
: string
The server's time zone to be used in DateTime fields conversions. Specified as an IANA identifier.
geobaseEnabled
: google.protobuf.BoolValue
Enable or disable geobase.
geobaseUri
: string
Address of the archive with the user geobase in Object Storage.
queryLogRetentionSize
: google.protobuf.Int64Value
The maximum size that query_log can grow to before old data will be removed. If set to 0, automatic removal of query_log data based on size is disabled.
queryLogRetentionTime
: google.protobuf.Int64Value
The maximum time that query_log records will be retained before removal. If set to 0, automatic removal of query_log data based on time is disabled.
queryThreadLogEnabled
: google.protobuf.BoolValue
Whether query_thread_log system table is enabled.
queryThreadLogRetentionSize
: google.protobuf.Int64Value
The maximum size that query_thread_log can grow to before old data will be removed. If set to 0, automatic removal of query_thread_log data based on size is disabled.
queryThreadLogRetentionTime
: google.protobuf.Int64Value
The maximum time that query_thread_log records will be retained before removal. If set to 0, automatic removal of query_thread_log data based on time is disabled.
partLogRetentionSize
: google.protobuf.Int64Value
The maximum size that part_log can grow to before old data will be removed. If set to 0, automatic removal of part_log data based on size is disabled.
partLogRetentionTime
: google.protobuf.Int64Value
The maximum time that part_log records will be retained before removal. If set to 0, automatic removal of part_log data based on time is disabled.
metricLogEnabled
: google.protobuf.BoolValue
Whether metric_log system table is enabled.
metricLogRetentionSize
: google.protobuf.Int64Value
The maximum size that metric_log can grow to before old data will be removed. If set to 0, automatic removal of metric_log data based on size is disabled.
metricLogRetentionTime
: google.protobuf.Int64Value
The maximum time that metric_log records will be retained before removal. If set to 0, automatic removal of metric_log data based on time is disabled.
traceLogEnabled
: google.protobuf.BoolValue
Whether trace_log system table is enabled.
traceLogRetentionSize
: google.protobuf.Int64Value
The maximum size that trace_log can grow to before old data will be removed. If set to 0, automatic removal of trace_log data based on size is disabled.
traceLogRetentionTime
: google.protobuf.Int64Value
The maximum time that trace_log records will be retained before removal. If set to 0, automatic removal of trace_log data based on time is disabled.
textLogEnabled
: google.protobuf.BoolValue
Whether text_log system table is enabled.
textLogRetentionSize
: google.protobuf.Int64Value
The maximum size that text_log can grow to before old data will be removed. If set to 0, automatic removal of text_log data based on size is disabled.
textLogRetentionTime
: google.protobuf.Int64Value
The maximum time that text_log records will be retained before removal. If set to 0, automatic removal of text_log data based on time is disabled.
textLogLevel
: LogLevel
Logging level for text_log system table. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR.
opentelemetrySpanLogEnabled
: google.protobuf.BoolValue
Enable or disable opentelemetry_span_log system table. Default value: false.
opentelemetrySpanLogRetentionSize
: google.protobuf.Int64Value
The maximum size that opentelemetry_span_log can grow to before old data will be removed. If set to 0 (default), automatic removal of opentelemetry_span_log data based on size is disabled.
opentelemetrySpanLogRetentionTime
: google.protobuf.Int64Value
The maximum time that opentelemetry_span_log records will be retained before removal. If set to 0, automatic removal of opentelemetry_span_log data based on time is disabled.
queryViewsLogEnabled
: google.protobuf.BoolValue
Enable or disable query_views_log system table. Default value: false.
queryViewsLogRetentionSize
: google.protobuf.Int64Value
The maximum size that query_views_log can grow to before old data will be removed. If set to 0 (default), automatic removal of query_views_log data based on size is disabled.
queryViewsLogRetentionTime
: google.protobuf.Int64Value
The maximum time that query_views_log records will be retained before removal. If set to 0, automatic removal of query_views_log data based on time is disabled.
asynchronousMetricLogEnabled
: google.protobuf.BoolValue
Enable or disable asynchronous_metric_log system table. Default value: false.
asynchronousMetricLogRetentionSize
: google.protobuf.Int64Value
The maximum size that asynchronous_metric_log can grow to before old data will be removed. If set to 0 (default), automatic removal of asynchronous_metric_log data based on size is disabled.
asynchronousMetricLogRetentionTime
: google.protobuf.Int64Value
The maximum time that asynchronous_metric_log records will be retained before removal. If set to 0, automatic removal of asynchronous_metric_log data based on time is disabled.
sessionLogEnabled
: google.protobuf.BoolValue
Enable or disable session_log system table. Default value: false.
sessionLogRetentionSize
: google.protobuf.Int64Value
The maximum size that session_log can grow to before old data will be removed. If set to 0 (default), automatic removal of session_log data based on size is disabled.
sessionLogRetentionTime
: google.protobuf.Int64Value
The maximum time that session_log records will be retained before removal. If set to 0, automatic removal of session_log data based on time is disabled.
zookeeperLogEnabled
: google.protobuf.BoolValue
Enable or disable zookeeper_log system table. Default value: false.
zookeeperLogRetentionSize
: google.protobuf.Int64Value
The maximum size that zookeeper_log can grow to before old data will be removed. If set to 0 (default), automatic removal of zookeeper_log data based on size is disabled.
zookeeperLogRetentionTime
: google.protobuf.Int64Value
The maximum time that zookeeper_log records will be retained before removal. If set to 0, automatic removal of zookeeper_log data based on time is disabled.
asynchronousInsertLogEnabled
: google.protobuf.BoolValue
Enable or disable asynchronous_insert_log system table. Default value: false. Minimal required ClickHouse version: 22.10.
asynchronousInsertLogRetentionSize
: google.protobuf.Int64Value
The maximum size that asynchronous_insert_log can grow to before old data will be removed. If set to 0 (default), automatic removal of asynchronous_insert_log data based on size is disabled.
asynchronousInsertLogRetentionTime
: google.protobuf.Int64Value
The maximum time that asynchronous_insert_log records will be retained before removal. If set to 0, automatic removal of asynchronous_insert_log data based on time is disabled.
backgroundPoolSize
: google.protobuf.Int64Value
backgroundMergesMutationsConcurrencyRatio
: google.protobuf.Int64Value
Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example, if the ratio equals to 2 and background_pool_size is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operations could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server. The same as for background_pool_size setting background_merges_mutations_concurrency_ratio could be applied from the default profile for backward compatibility. Default: 2 See in-depth description in ClickHouse documentation
backgroundSchedulePoolSize
: google.protobuf.Int64Value
backgroundFetchesPoolSize
: google.protobuf.Int64Value
Sets the number of threads performing background fetches for tables with ReplicatedMergeTree engines. Default value: 8.
More info see in ClickHouse documentation.
backgroundMovePoolSize
: google.protobuf.Int64Value
backgroundDistributedSchedulePoolSize
: google.protobuf.Int64Value
backgroundBufferFlushSchedulePoolSize
: google.protobuf.Int64Value
backgroundMessageBrokerSchedulePoolSize
: google.protobuf.Int64Value
backgroundCommonPoolSize
: google.protobuf.Int64Value
The maximum number of threads that will be used for performing a variety of operations (mostly garbage collection) for *MergeTree-engine tables in a background. Default: 8 See in-depth description in ClickHouse documentation
defaultDatabase
: google.protobuf.StringValue
The default database.
To get a list of cluster databases, see Yandex Managed ClickHouse documentation.
totalMemoryProfilerStep
: google.protobuf.Int64Value
Sets the memory size (in bytes) for a stack trace at every peak allocation step. Default value: 4194304.
More info see in ClickHouse documentation.
totalMemoryTrackerSampleProbability
: google.protobuf.DoubleValue
queryMaskingRules
: QueryMaskingRule
Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs, system.query_log, system.text_log, system.processes tables, and in logs sent to the client. That allows preventing sensitive data leakage from SQL queries (like names, emails, personal identifiers or credit card numbers) to logs. Change of these settings is applied with ClickHouse restart See in-depth description in ClickHouse documentation
dictionariesLazyLoad
: google.protobuf.BoolValue
Lazy loading of dictionaries. Default: true See in-depth description in ClickHouse documentation
queryCache
: QueryCache
Query cache configuration. Min version: 23.5 See in-depth description in ClickHouse documentation
Resources
resourcePresetId
: string
ID of the preset for computational resources available to a host (CPU, memory etc.). All available presets are listed in the documentation
diskSize
: int64
Volume of the storage available to a host, in bytes.
diskTypeId
: string
Type of the storage environment for the host. Possible values:
- network-hdd - network HDD drive,
- network-ssd - network SSD drive,
- local-ssd - local SSD storage.
Clickhouse
config
: config.ClickhouseConfig
Configuration for a ClickHouse server.
resources
: Resources
Resources allocated to ClickHouse hosts.
Zookeeper
resources
: Resources
Resources allocated to ZooKeeper hosts. If not set, minimal available resources will be used. All available resource presets can be retrieved with a ResourcePresetService.List request.
Access
dataLens
: bool
Allow to export data from the cluster to DataLens.
webSql
: bool
Allow SQL queries to the cluster databases from the management console.
See SQL queries in the management console for more details.
metrika
: bool
Allow to import data from Yandex Metrica and AppMetrica to the cluster.
See AppMetrica documentation for more details.
serverless
: bool
Allow access to cluster for Serverless.
dataTransfer
: bool
Allow access for DataTransfer
yandexQuery
: bool
Allow access for Query
CloudStorage
enabled
: bool
Whether to use Object Storage for storing ClickHouse data.
moveFactor
: google.protobuf.DoubleValue
dataCacheEnabled
: google.protobuf.BoolValue
dataCacheMaxSize
: google.protobuf.Int64Value
preferNotToMerge
: google.protobuf.BoolValue
Kafka
SecurityProtocol
SECURITY_PROTOCOL_UNSPECIFIED
SECURITY_PROTOCOL_PLAINTEXT
SECURITY_PROTOCOL_SSL
SECURITY_PROTOCOL_SASL_PLAINTEXT
SECURITY_PROTOCOL_SASL_SSL
SaslMechanism
SASL_MECHANISM_UNSPECIFIED
SASL_MECHANISM_GSSAPI
SASL_MECHANISM_PLAIN
SASL_MECHANISM_SCRAM_SHA_256
SASL_MECHANISM_SCRAM_SHA_512
Debug
DEBUG_UNSPECIFIED
DEBUG_GENERIC
DEBUG_BROKER
DEBUG_TOPIC
DEBUG_METADATA
DEBUG_FEATURE
DEBUG_QUEUE
DEBUG_MSG
DEBUG_PROTOCOL
DEBUG_CGRP
DEBUG_SECURITY
DEBUG_FETCH
DEBUG_INTERCEPTOR
DEBUG_PLUGIN
DEBUG_CONSUMER
DEBUG_ADMIN
DEBUG_EOS
DEBUG_MOCK
DEBUG_ASSIGNOR
DEBUG_CONF
DEBUG_TELEMETRY
DEBUG_ALL
AutoOffsetReset
AUTO_OFFSET_RESET_UNSPECIFIED
AUTO_OFFSET_RESET_SMALLEST
AUTO_OFFSET_RESET_EARLIEST
AUTO_OFFSET_RESET_BEGINNING
AUTO_OFFSET_RESET_LARGEST
AUTO_OFFSET_RESET_LATEST
AUTO_OFFSET_RESET_END
AUTO_OFFSET_RESET_ERROR
securityProtocol
: SecurityProtocol
saslMechanism
: SaslMechanism
saslUsername
: string
saslPassword
: string
enableSslCertificateVerification
: google.protobuf.BoolValue
maxPollIntervalMs
: google.protobuf.Int64Value
sessionTimeoutMs
: google.protobuf.Int64Value
debug
: Debug
autoOffsetReset
: AutoOffsetReset
Header
name
: string
value
: string
Replica
host
: string
MySQL host of the replica.
priority
: int64
The priority of the replica that ClickHouse takes into account when connecting. Replica with the highest priority should have this field set to the lowest number.
port
: int64
Port to use when connecting to the replica. If a port is not specified for a replica, ClickHouse uses the port specified for the source.
user
: string
Name of the MySQL database user.
password
: string
Password of the MySQL database user.
Attribute
name
: string
Name of the column.
type
: string
Type of the column.
nullValue
: string
Default value for an element without data (for example, an empty string).
expression
: string
Expression, describing the attribute, if applicable.
hierarchical
: bool
Indication of hierarchy support.
Default value: false
.
injective
: bool
Indication of injective mapping "id -> attribute".
Default value: false
.
Id
Numeric key.
name
: string
Name of the numeric key.
Key
Complex key.
attributes
: Attribute
Attributes of a complex key.
Structure
Attribute
name
: string
Name of the column.
type
: string
Type of the column.
nullValue
: string
Default value for an element without data (for example, an empty string).
expression
: string
Expression, describing the attribute, if applicable.
hierarchical
: bool
Indication of hierarchy support.
Default value: false
.
injective
: bool
Indication of injective mapping "id -> attribute".
Default value: false
.
Id
Numeric key.
name
: string
Name of the numeric key.
Key
Complex key.
attributes
: Attribute
Attributes of a complex key.
id
: Id
Single numeric key column for the dictionary.
key
: Key
Composite key for the dictionary, containing of one or more key columns. For details, see ClickHouse documentation.
rangeMin
: Attribute
Field holding the beginning of the range for dictionaries with RANGE_HASHED
layout.
For details, see ClickHouse documentation.
rangeMax
: Attribute
Field holding the end of the range for dictionaries with RANGE_HASHED
layout.
For details, see ClickHouse documentation.
attributes
: Attribute
Description of the fields available for database queries. For details, see ClickHouse documentation.
Layout
Layout determining how to store the dictionary in memory.
Type
TYPE_UNSPECIFIED
FLAT
The entire dictionary is stored in memory in the form of flat arrays. Available for all dictionary sources.
HASHED
The entire dictionary is stored in memory in the form of a hash table. Available for all dictionary sources.
COMPLEX_KEY_HASHED
Similar to HASHED, to be used with composite keys. Available for all dictionary sources.
RANGE_HASHED
The entire dictionary is stored in memory in the form of a hash table, with an ordered array of ranges and their corresponding values. Available for all dictionary sources.
CACHE
The dictionary is stored in a cache with a set number of cells. Available for MySQL, ClickHouse and HTTP dictionary sources.
COMPLEX_KEY_CACHE
Similar to CACHE, to be used with composite keys. Available for MySQL, ClickHouse and HTTP dictionary sources.
type
: Type
Layout type for an external dictionary.
sizeInCells
: int64
Number of cells in the cache. Rounded up to a power of two. Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
maxArraySize
: int64
Maximum dictionary key size. Applicable only for FLAT layout type.
Range
min
: int64
Minimum dictionary lifetime.
max
: int64
Maximum dictionary lifetime.
HttpSource
Header
name
: string
value
: string
url
: string
URL of the source dictionary available over HTTP.
format
: string
The data format. Valid values are all formats supported by ClickHouse SQL dialect.
headers
: Header
HTTP headers.
MysqlSource
Replica
host
: string
MySQL host of the replica.
priority
: int64
The priority of the replica that ClickHouse takes into account when connecting. Replica with the highest priority should have this field set to the lowest number.
port
: int64
Port to use when connecting to the replica. If a port is not specified for a replica, ClickHouse uses the port specified for the source.
user
: string
Name of the MySQL database user.
password
: string
Password of the MySQL database user.
db
: string
Name of the MySQL database to connect to.
table
: string
Name of the database table to use as a ClickHouse dictionary.
port
: int64
Default port to use when connecting to a replica of the dictionary source.
user
: string
Name of the default user for replicas of the dictionary source.
password
: string
Password of the default user for replicas of the dictionary source.
replicas
: Replica
List of MySQL replicas of the database used as dictionary source.
where
: string
Selection criteria for the data in the specified MySQL table.
invalidateQuery
: string
Query for checking the dictionary status, to pull only updated data. For more details, see ClickHouse documentation on dictionaries.
closeConnection
: google.protobuf.BoolValue
Should the connection be closed after each request.
shareConnection
: google.protobuf.BoolValue
Should a connection be shared for some requests.
ClickhouseSource
db
: string
Name of the ClickHouse database.
table
: string
Name of the table in the specified database to be used as the dictionary source.
host
: string
ClickHouse host of the specified database.
port
: int64
Port to use when connecting to the host.
user
: string
Name of the ClickHouse database user.
password
: string
Password of the ClickHouse database user.
where
: string
Selection criteria for the data in the specified ClickHouse table.
secure
: google.protobuf.BoolValue
Use ssl for connection.
MongodbSource
db
: string
Name of the MongoDB database.
collection
: string
Name of the collection in the specified database to be used as the dictionary source.
host
: string
MongoDB host of the specified database.
port
: int64
Port to use when connecting to the host.
user
: string
Name of the MongoDB database user.
password
: string
Password of the MongoDB database user.
options
: string
PostgresqlSource
SslMode
SSL_MODE_UNSPECIFIED
DISABLE
Only try a non-SSL connection.
ALLOW
First try a non-SSL connection; if that fails, try an SSL connection.
PREFER
First try an SSL connection; if that fails, try a non-SSL connection.
VERIFY_CA
Only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA).
VERIFY_FULL
Only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate.
db
: string
Name of the PostrgreSQL database.
table
: string
Name of the table in the specified database to be used as the dictionary source.
hosts
: string
Name of the PostrgreSQL host
port
: int64
Port to use when connecting to the host.
user
: string
Name of the PostrgreSQL database user.
password
: string
Password of the PostrgreSQL database user.
invalidateQuery
: string
Query for checking the dictionary status, to pull only updated data. For more details, see ClickHouse documentation on dictionaries.
sslMode
: SslMode
Mode of SSL TCP/IP connection to the PostgreSQL host. For more details, see PostgreSQL documentation.
Retention
age
: int64
Minimum age of the data in seconds.
precision
: int64
Precision of determining the age of the data, in seconds.
Pattern
Retention
age
: int64
Minimum age of the data in seconds.
precision
: int64
Precision of determining the age of the data, in seconds.
regexp
: string
Pattern for metric names.
function
: string
Name of the aggregating function to apply to data of the age specified in retention.
retention
: Retention
Age of data to use for thinning.
MergeTree
Options specific to the MergeTree table engine.
replicatedDeduplicationWindow
: google.protobuf.Int64Value
Number of blocks of hashes to keep in ZooKeeper.
replicatedDeduplicationWindowSeconds
: google.protobuf.Int64Value
Period of time to keep blocks of hashes for.
partsToDelayInsert
: google.protobuf.Int64Value
If table contains at least that many active parts in single partition, artificially slow down insert into table.
partsToThrowInsert
: google.protobuf.Int64Value
If more than this number active parts in single partition, throw 'Too many parts ...' exception.
inactivePartsToDelayInsert
: google.protobuf.Int64Value
inactivePartsToThrowInsert
: google.protobuf.Int64Value
maxReplicatedMergesInQueue
: google.protobuf.Int64Value
How many tasks of merging and mutating parts are allowed simultaneously in ReplicatedMergeTree queue.
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge
: google.protobuf.Int64Value
If there is less than specified number of free entries in background pool (or replicated queue), start to lower maximum size of merge to process.
maxBytesToMergeAtMinSpaceInPool
: google.protobuf.Int64Value
Maximum in total size of parts to merge, when there are minimum free threads in background pool (or entries in replication queue).
maxBytesToMergeAtMaxSpaceInPool
: google.protobuf.Int64Value
minBytesForWidePart
: google.protobuf.Int64Value
Minimum number of bytes in a data part that can be stored in Wide format.
More info see in ClickHouse documentation.
minRowsForWidePart
: google.protobuf.Int64Value
Minimum number of rows in a data part that can be stored in Wide format.
More info see in ClickHouse documentation.
ttlOnlyDropParts
: google.protobuf.BoolValue
Enables or disables complete dropping of data parts where all rows are expired in MergeTree tables.
More info see in ClickHouse documentation.
allowRemoteFsZeroCopyReplication
: google.protobuf.BoolValue
mergeWithTtlTimeout
: google.protobuf.Int64Value
mergeWithRecompressionTtlTimeout
: google.protobuf.Int64Value
maxPartsInTotal
: google.protobuf.Int64Value
maxNumberOfMergesWithTtlInPool
: google.protobuf.Int64Value
cleanupDelayPeriod
: google.protobuf.Int64Value
numberOfFreeEntriesInPoolToExecuteMutation
: google.protobuf.Int64Value
maxAvgPartSizeForTooManyParts
: google.protobuf.Int64Value
The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts. Default: 1 GiB Min version: 22.10 See in-depth description in ClickHouse GitHub
minAgeToForceMergeSeconds
: google.protobuf.Int64Value
Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds. Default: 0 - disabled Min_version: 22.10 See in-depth description in ClickHouse documentation
minAgeToForceMergeOnPartitionOnly
: google.protobuf.BoolValue
Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset. Default: false Min_version: 22.11 See in-depth description in ClickHouse documentation
mergeSelectingSleepMs
: google.protobuf.Int64Value
Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters. Default: 5000 Min_version: 21.10 See in-depth description in ClickHouse documentation
mergeMaxBlockSize
: google.protobuf.Int64Value
The number of rows that are read from the merged parts into memory. Default: 8192 See in-depth description in ClickHouse documentation
checkSampleColumnIsCorrect
: google.protobuf.BoolValue
Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned integer types: UInt8, UInt16, UInt32, UInt64. Default: true See in-depth description in ClickHouse documentation
maxMergeSelectingSleepMs
: google.protobuf.Int64Value
Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters. Default: 60000 Min_version: 23.6 See in-depth description in ClickHouse GitHub
maxCleanupDelayPeriod
: google.protobuf.Int64Value
Maximum period to clean old queue logs, blocks hashes and parts. Default: 300 Min_version: 23.6 See in-depth description in ClickHouse GitHub
Compression
Method
METHOD_UNSPECIFIED
LZ4
ZSTD
method
: Method
Compression method to use for the specified combination of min_part_size and min_part_size_ratio.
minPartSize
: int64
Minimum size of a part of a table.
minPartSizeRatio
: double
Minimum ratio of a part relative to the size of all the data in the table.
level
: google.protobuf.Int64Value
ExternalDictionary
HttpSource
Header
name
: string
value
: string
url
: string
URL of the source dictionary available over HTTP.
format
: string
The data format. Valid values are all formats supported by ClickHouse SQL dialect.
headers
: Header
HTTP headers.
MysqlSource
Replica
host
: string
MySQL host of the replica.
priority
: int64
The priority of the replica that ClickHouse takes into account when connecting. Replica with the highest priority should have this field set to the lowest number.
port
: int64
Port to use when connecting to the replica. If a port is not specified for a replica, ClickHouse uses the port specified for the source.
user
: string
Name of the MySQL database user.
password
: string
Password of the MySQL database user.
db
: string
Name of the MySQL database to connect to.
table
: string
Name of the database table to use as a ClickHouse dictionary.
port
: int64
Default port to use when connecting to a replica of the dictionary source.
user
: string
Name of the default user for replicas of the dictionary source.
password
: string
Password of the default user for replicas of the dictionary source.
replicas
: Replica
List of MySQL replicas of the database used as dictionary source.
where
: string
Selection criteria for the data in the specified MySQL table.
invalidateQuery
: string
Query for checking the dictionary status, to pull only updated data. For more details, see ClickHouse documentation on dictionaries.
closeConnection
: google.protobuf.BoolValue
Should the connection be closed after each request.
shareConnection
: google.protobuf.BoolValue
Should a connection be shared for some requests.
ClickhouseSource
db
: string
Name of the ClickHouse database.
table
: string
Name of the table in the specified database to be used as the dictionary source.
host
: string
ClickHouse host of the specified database.
port
: int64
Port to use when connecting to the host.
user
: string
Name of the ClickHouse database user.
password
: string
Password of the ClickHouse database user.
where
: string
Selection criteria for the data in the specified ClickHouse table.
secure
: google.protobuf.BoolValue
Use ssl for connection.
MongodbSource
db
: string
Name of the MongoDB database.
collection
: string
Name of the collection in the specified database to be used as the dictionary source.
host
: string
MongoDB host of the specified database.
port
: int64
Port to use when connecting to the host.
user
: string
Name of the MongoDB database user.
password
: string
Password of the MongoDB database user.
options
: string
PostgresqlSource
SslMode
SSL_MODE_UNSPECIFIED
DISABLE
Only try a non-SSL connection.
ALLOW
First try a non-SSL connection; if that fails, try an SSL connection.
PREFER
First try an SSL connection; if that fails, try a non-SSL connection.
VERIFY_CA
Only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA).
VERIFY_FULL
Only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate.
db
: string
Name of the PostrgreSQL database.
table
: string
Name of the table in the specified database to be used as the dictionary source.
hosts
: string
Name of the PostrgreSQL host
port
: int64
Port to use when connecting to the host.
user
: string
Name of the PostrgreSQL database user.
password
: string
Password of the PostrgreSQL database user.
invalidateQuery
: string
Query for checking the dictionary status, to pull only updated data. For more details, see ClickHouse documentation on dictionaries.
sslMode
: SslMode
Mode of SSL TCP/IP connection to the PostgreSQL host. For more details, see PostgreSQL documentation.
Structure
Attribute
name
: string
Name of the column.
type
: string
Type of the column.
nullValue
: string
Default value for an element without data (for example, an empty string).
expression
: string
Expression, describing the attribute, if applicable.
hierarchical
: bool
Indication of hierarchy support.
Default value: false
.
injective
: bool
Indication of injective mapping "id -> attribute".
Default value: false
.
Id
Numeric key.
name
: string
Name of the numeric key.
Key
Complex key.
attributes
: Attribute
Attributes of a complex key.
id
: Id
Single numeric key column for the dictionary.
key
: Key
Composite key for the dictionary, containing of one or more key columns. For details, see ClickHouse documentation.
rangeMin
: Attribute
Field holding the beginning of the range for dictionaries with RANGE_HASHED
layout.
For details, see ClickHouse documentation.
rangeMax
: Attribute
Field holding the end of the range for dictionaries with RANGE_HASHED
layout.
For details, see ClickHouse documentation.
attributes
: Attribute
Description of the fields available for database queries. For details, see ClickHouse documentation.
Layout
Layout determining how to store the dictionary in memory.
Type
TYPE_UNSPECIFIED
FLAT
The entire dictionary is stored in memory in the form of flat arrays. Available for all dictionary sources.
HASHED
The entire dictionary is stored in memory in the form of a hash table. Available for all dictionary sources.
COMPLEX_KEY_HASHED
Similar to HASHED, to be used with composite keys. Available for all dictionary sources.
RANGE_HASHED
The entire dictionary is stored in memory in the form of a hash table, with an ordered array of ranges and their corresponding values. Available for all dictionary sources.
CACHE
The dictionary is stored in a cache with a set number of cells. Available for MySQL, ClickHouse and HTTP dictionary sources.
COMPLEX_KEY_CACHE
Similar to CACHE, to be used with composite keys. Available for MySQL, ClickHouse and HTTP dictionary sources.
type
: Type
Layout type for an external dictionary.
sizeInCells
: int64
Number of cells in the cache. Rounded up to a power of two. Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
maxArraySize
: int64
Maximum dictionary key size. Applicable only for FLAT layout type.
Range
min
: int64
Minimum dictionary lifetime.
max
: int64
Maximum dictionary lifetime.
name
: string
Name of the external dictionary.
structure
: Structure
Set of attributes for the external dictionary. For in-depth description, see ClickHouse documentation.
layout
: Layout
Layout for storing the dictionary in memory. For in-depth description, see ClickHouse documentation.
fixedLifetime
: int64Fixed interval between dictionary updates.
lifetimeRange
: RangeRange of intervals between dictionary updates for ClickHouse to choose from.
httpSource
: HttpSourceHTTP source for the dictionary.
mysqlSource
: MysqlSourceMySQL source for the dictionary.
clickhouseSource
: ClickhouseSourceClickHouse source for the dictionary.
mongodbSource
: MongodbSourceMongoDB source for the dictionary.
postgresqlSource
: PostgresqlSourcePostgreSQL source for the dictionary.
GraphiteRollup
Rollup settings for the GraphiteMergeTree table engine.
Pattern
Retention
age
: int64
Minimum age of the data in seconds.
precision
: int64
Precision of determining the age of the data, in seconds.
regexp
: string
Pattern for metric names.
function
: string
Name of the aggregating function to apply to data of the age specified in retention.
retention
: Retention
Age of data to use for thinning.
name
: string
Name for the specified combination of settings for Graphite rollup.
patterns
: Pattern
Pattern to use for the rollup.
pathColumnName
: string
The name of the column storing the metric name (Graphite sensor). Default: Path See in-depth description in ClickHouse documentation
timeColumnName
: string
The name of the column storing the time of measuring the metric. Default: Time See in-depth description in ClickHouse documentation
valueColumnName
: string
The name of the column storing the value of the metric at the time set in time_column_name. Default: Value See in-depth description in ClickHouse documentation
versionColumnName
: string
The name of the column storing the version of the metric. Default: Timestamp See in-depth description in ClickHouse documentation
KafkaTopic
name
: string
settings
: Kafka
Rabbitmq
username
: string
RabbitMQ username
password
: string
RabbitMQ password
vhost
: string
RabbitMQ virtual host
QueryMaskingRule
name
: string
Name for the rule.
regexp
: string
RE2 compatible regular expression. Required.
replace
: string
Substitution string for sensitive data. Default: six asterisks
QueryCache
maxSizeInBytes
: google.protobuf.Int64Value
The maximum cache size in bytes. Default: 1073741824 (1 GiB)
maxEntries
: google.protobuf.Int64Value
The maximum number of SELECT query results stored in the cache. Default: 1024
maxEntrySizeInBytes
: google.protobuf.Int64Value
The maximum size in bytes SELECT query results may have to be saved in the cache. Dafault: 1048576 (1 MiB)
maxEntrySizeInRows
: google.protobuf.Int64Value
The maximum number of rows SELECT query results may have to be saved in the cache. Default: 30000000 (30 mil)
Operation
An Operation resource. For more information, see Operation.
id
: string
ID of the operation.
description
: string
Description of the operation. 0-256 characters long.
createdAt
: google.protobuf.Timestamp
Creation timestamp.
createdBy
: string
ID of the user or service account who initiated the operation.
modifiedAt
: google.protobuf.Timestamp
The time when the Operation resource was last modified.
done
: bool
If the value is false
, it means the operation is still in progress.
If true
, the operation is completed, and either error
or response
is available.
metadata
: google.protobuf.Any
Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.
One of result
The operation result.
If done == false
and there was no failure detected, neither error
nor response
is set.
If done == false
and there was a failure detected, error
is set.
If done == true
, exactly one of error
or response
is set.
error
: google.rpc.StatusThe error result of the operation in case of failure or cancellation.
response
: google.protobuf.AnyThe normal response of the operation in case of success.
If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.