AddShard
Creates a new shard.
- TypeScript
- Python
import {
cloudApi,
decodeMessage,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";
const AddClusterShardRequest =
cloudApi.mdb.clickhouse_cluster_service.AddClusterShardRequest;
const ClickhouseConfig_LogLevel =
cloudApi.mdb.clickhouse_config_clickhouse.ClickhouseConfig_LogLevel;
const Compression_Method =
cloudApi.mdb.clickhouse_config_clickhouse.Compression_Method;
const Host_Type = cloudApi.mdb.clickhouse_cluster.Host_Type;
const Kafka_AutoOffsetReset =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_AutoOffsetReset;
const Kafka_Debug = cloudApi.mdb.clickhouse_config_clickhouse.Kafka_Debug;
const Kafka_SaslMechanism =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SaslMechanism;
const Kafka_SecurityProtocol =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SecurityProtocol;
const Layout_Type = cloudApi.mdb.clickhouse_config_clickhouse.Layout_Type;
const PostgresqlSource_SslMode =
cloudApi.mdb.clickhouse_config_clickhouse.PostgresqlSource_SslMode;
const Shard = cloudApi.mdb.clickhouse_cluster.Shard;
(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.ClusterServiceClient);
const operation = await client.addShard(
AddClusterShardRequest.fromPartial({
clusterId: "clusterId",
shardName: "shardName",
// configSpec: {
// clickhouse: {
// config: {
// logLevel: ClickhouseConfig_LogLevel.TRACE,
// mergeTree: {
// replicatedDeduplicationWindow: {
// value: 0
// },
// replicatedDeduplicationWindowSeconds: {
// value: 0
// },
// partsToDelayInsert: {
// value: 0
// },
// partsToThrowInsert: {
// value: 0
// },
// inactivePartsToDelayInsert: {
// value: 0
// },
// inactivePartsToThrowInsert: {
// value: 0
// },
// maxReplicatedMergesInQueue: {
// value: 0
// },
// numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: {
// value: 0
// },
// maxBytesToMergeAtMinSpaceInPool: {
// value: 0
// },
// maxBytesToMergeAtMaxSpaceInPool: {
// value: 0
// },
// minBytesForWidePart: {
// value: 0
// },
// minRowsForWidePart: {
// value: 0
// },
// ttlOnlyDropParts: {
// value: true
// },
// allowRemoteFsZeroCopyReplication: {
// value: true
// },
// mergeWithTtlTimeout: {
// value: 0
// },
// mergeWithRecompressionTtlTimeout: {
// value: 0
// },
// maxPartsInTotal: {
// value: 0
// },
// maxNumberOfMergesWithTtlInPool: {
// value: 0
// },
// cleanupDelayPeriod: {
// value: 0
// },
// numberOfFreeEntriesInPoolToExecuteMutation: {
// value: 0
// },
// maxAvgPartSizeForTooManyParts: {
// value: 0
// },
// minAgeToForceMergeSeconds: {
// value: 0
// },
// minAgeToForceMergeOnPartitionOnly: {
// value: true
// },
// mergeSelectingSleepMs: {
// value: 0
// },
// mergeMaxBlockSize: {
// value: 0
// },
// checkSampleColumnIsCorrect: {
// value: true
// },
// maxMergeSelectingSleepMs: {
// value: 0
// },
// maxCleanupDelayPeriod: {
// value: 0
// }
// },
// compression: [{
// method: Compression_Method.LZ4,
// minPartSize: 0,
// minPartSizeRatio: 0,
// level: {
// value: 0
// }
// }],
// dictionaries: [{
// name: "name",
// structure: {
// id: {
// name: "name"
// },
// key: {
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// rangeMin: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// rangeMax: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// layout: {
// type: Layout_Type.FLAT,
// sizeInCells: 0,
// maxArraySize: 0
// },
// fixedLifetime: 0,
// lifetimeRange: {
// min: 0,
// max: 0
// },
// httpSource: {
// url: "url",
// format: "format",
// headers: [{
// name: "name",
// value: "value"
// }]
// },
// mysqlSource: {
// db: "db",
// table: "table",
// port: 0,
// user: "user",
// password: "password",
// replicas: [{
// host: "host",
// priority: 0,
// port: 0,
// user: "user",
// password: "password"
// }],
// where: "where",
// invalidateQuery: "invalidateQuery",
// closeConnection: {
// value: true
// },
// shareConnection: {
// value: true
// }
// },
// clickhouseSource: {
// db: "db",
// table: "table",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// where: "where",
// secure: {
// value: true
// }
// },
// mongodbSource: {
// db: "db",
// collection: "collection",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// options: "options"
// },
// postgresqlSource: {
// db: "db",
// table: "table",
// hosts: ["hosts"],
// port: 0,
// user: "user",
// password: "password",
// invalidateQuery: "invalidateQuery",
// sslMode: PostgresqlSource_SslMode.DISABLE
// }
// }],
// graphiteRollup: [{
// name: "name",
// patterns: [{
// regexp: "regexp",
// function: "function",
// retention: [{
// age: 0,
// precision: 0
// }]
// }],
// pathColumnName: "pathColumnName",
// timeColumnName: "timeColumnName",
// valueColumnName: "valueColumnName",
// versionColumnName: "versionColumnName"
// }],
// kafka: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// },
// kafkaTopics: [{
// name: "name",
// settings: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// }
// }],
// rabbitmq: {
// username: "username",
// password: "password",
// vhost: "vhost"
// },
// maxConnections: {
// value: 0
// },
// maxConcurrentQueries: {
// value: 0
// },
// keepAliveTimeout: {
// value: 0
// },
// uncompressedCacheSize: {
// value: 0
// },
// markCacheSize: {
// value: 0
// },
// maxTableSizeToDrop: {
// value: 0
// },
// maxPartitionSizeToDrop: {
// value: 0
// },
// builtinDictionariesReloadInterval: {
// value: 0
// },
// timezone: "timezone",
// geobaseEnabled: {
// value: true
// },
// geobaseUri: "geobaseUri",
// queryLogRetentionSize: {
// value: 0
// },
// queryLogRetentionTime: {
// value: 0
// },
// queryThreadLogEnabled: {
// value: true
// },
// queryThreadLogRetentionSize: {
// value: 0
// },
// queryThreadLogRetentionTime: {
// value: 0
// },
// partLogRetentionSize: {
// value: 0
// },
// partLogRetentionTime: {
// value: 0
// },
// metricLogEnabled: {
// value: true
// },
// metricLogRetentionSize: {
// value: 0
// },
// metricLogRetentionTime: {
// value: 0
// },
// traceLogEnabled: {
// value: true
// },
// traceLogRetentionSize: {
// value: 0
// },
// traceLogRetentionTime: {
// value: 0
// },
// textLogEnabled: {
// value: true
// },
// textLogRetentionSize: {
// value: 0
// },
// textLogRetentionTime: {
// value: 0
// },
// textLogLevel: ClickhouseConfig_LogLevel.TRACE,
// opentelemetrySpanLogEnabled: {
// value: true
// },
// opentelemetrySpanLogRetentionSize: {
// value: 0
// },
// opentelemetrySpanLogRetentionTime: {
// value: 0
// },
// queryViewsLogEnabled: {
// value: true
// },
// queryViewsLogRetentionSize: {
// value: 0
// },
// queryViewsLogRetentionTime: {
// value: 0
// },
// asynchronousMetricLogEnabled: {
// value: true
// },
// asynchronousMetricLogRetentionSize: {
// value: 0
// },
// asynchronousMetricLogRetentionTime: {
// value: 0
// },
// sessionLogEnabled: {
// value: true
// },
// sessionLogRetentionSize: {
// value: 0
// },
// sessionLogRetentionTime: {
// value: 0
// },
// zookeeperLogEnabled: {
// value: true
// },
// zookeeperLogRetentionSize: {
// value: 0
// },
// zookeeperLogRetentionTime: {
// value: 0
// },
// asynchronousInsertLogEnabled: {
// value: true
// },
// asynchronousInsertLogRetentionSize: {
// value: 0
// },
// asynchronousInsertLogRetentionTime: {
// value: 0
// },
// backgroundPoolSize: {
// value: 0
// },
// backgroundMergesMutationsConcurrencyRatio: {
// value: 0
// },
// backgroundSchedulePoolSize: {
// value: 0
// },
// backgroundFetchesPoolSize: {
// value: 0
// },
// backgroundMovePoolSize: {
// value: 0
// },
// backgroundDistributedSchedulePoolSize: {
// value: 0
// },
// backgroundBufferFlushSchedulePoolSize: {
// value: 0
// },
// backgroundMessageBrokerSchedulePoolSize: {
// value: 0
// },
// backgroundCommonPoolSize: {
// value: 0
// },
// defaultDatabase: {
// value: "value"
// },
// totalMemoryProfilerStep: {
// value: 0
// },
// totalMemoryTrackerSampleProbability: {
// value: 0
// },
// queryMaskingRules: [{
// name: "name",
// regexp: "regexp",
// replace: "replace"
// }],
// dictionariesLazyLoad: {
// value: true
// },
// queryCache: {
// maxSizeInBytes: {
// value: 0
// },
// maxEntries: {
// value: 0
// },
// maxEntrySizeInBytes: {
// value: 0
// },
// maxEntrySizeInRows: {
// value: 0
// }
// }
// },
// resources: {
// resourcePresetId: "resourcePresetId",
// diskSize: 0,
// diskTypeId: "diskTypeId"
// },
// weight: {
// value: 0
// }
// }
// },
// hostSpecs: [{
// zoneId: "zoneId",
// type: Host_Type.CLICKHOUSE,
// subnetId: "subnetId",
// assignPublicIp: true,
// shardName: "shardName"
// }],
// copySchema: {
// value: true
// }
})
);
const finishedOp = await waitForOperation(operation, session);
if (finishedOp.response) {
const result = decodeMessage<typeof Shard>(finishedOp.response);
console.log(result);
}
})();
import os
import grpc
import yandexcloud
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import AddClusterShardMetadata
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import AddClusterShardRequest
from yandex.cloud.mdb.clickhouse.v1.config.clickhouse_pb2 import ClickhouseConfig
from yandex.cloud.dataproc.v1.cluster_service_pb2_grpc import ClusterServiceStub
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import HostSpec
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Resources
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Shard
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import ShardConfigSpec
token = os.getenv("YC_OAUTH_TOKEN")
sdk = yandexcloud.SDK(token=token)
service = sdk.client(ClusterServiceStub)
operation = service.AddShard(
AddClusterShardRequest(
cluster_id="clusterId",
shard_name="shardName",
# config_spec = ShardConfigSpec(
# clickhouse = ShardConfigSpec.Clickhouse(
# config = ClickhouseConfig(
# log_level = ClickhouseConfig.LogLevel.TRACE,
# merge_tree = ClickhouseConfig.MergeTree(
# replicated_deduplication_window = Int64Value(
# value = 0
# ),
# replicated_deduplication_window_seconds = Int64Value(
# value = 0
# ),
# parts_to_delay_insert = Int64Value(
# value = 0
# ),
# parts_to_throw_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_delay_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_throw_insert = Int64Value(
# value = 0
# ),
# max_replicated_merges_in_queue = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_lower_max_size_of_merge = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_min_space_in_pool = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_max_space_in_pool = Int64Value(
# value = 0
# ),
# min_bytes_for_wide_part = Int64Value(
# value = 0
# ),
# min_rows_for_wide_part = Int64Value(
# value = 0
# ),
# ttl_only_drop_parts = BoolValue(
# value = true
# ),
# allow_remote_fs_zero_copy_replication = BoolValue(
# value = true
# ),
# merge_with_ttl_timeout = Int64Value(
# value = 0
# ),
# merge_with_recompression_ttl_timeout = Int64Value(
# value = 0
# ),
# max_parts_in_total = Int64Value(
# value = 0
# ),
# max_number_of_merges_with_ttl_in_pool = Int64Value(
# value = 0
# ),
# cleanup_delay_period = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_execute_mutation = Int64Value(
# value = 0
# ),
# max_avg_part_size_for_too_many_parts = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_seconds = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_on_partition_only = BoolValue(
# value = true
# ),
# merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# merge_max_block_size = Int64Value(
# value = 0
# ),
# check_sample_column_is_correct = BoolValue(
# value = true
# ),
# max_merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# max_cleanup_delay_period = Int64Value(
# value = 0
# )
# ),
# compression = [ClickhouseConfig.Compression(
# method = Compression.Method.LZ4,
# min_part_size = 0,
# min_part_size_ratio = 0,
# level = Int64Value(
# value = 0
# )
# )],
# dictionaries = [ClickhouseConfig.ExternalDictionary(
# name = "name",
# structure = ExternalDictionary.Structure(
# id = Structure.Id(
# name = "name"
# ),
# key = Structure.Key(
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# range_min = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# range_max = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# layout = ExternalDictionary.Layout(
# type = Layout.Type.FLAT,
# size_in_cells = 0,
# max_array_size = 0
# ),
# fixed_lifetime = 0,
# lifetime_range = ExternalDictionary.Range(
# min = 0,
# max = 0
# ),
# http_source = ExternalDictionary.HttpSource(
# url = "url",
# format = "format",
# headers = [HttpSource.Header(
# name = "name",
# value = "value"
# )]
# ),
# mysql_source = ExternalDictionary.MysqlSource(
# db = "db",
# table = "table",
# port = 0,
# user = "user",
# password = "password",
# replicas = [MysqlSource.Replica(
# host = "host",
# priority = 0,
# port = 0,
# user = "user",
# password = "password"
# )],
# where = "where",
# invalidate_query = "invalidateQuery",
# close_connection = BoolValue(
# value = true
# ),
# share_connection = BoolValue(
# value = true
# )
# ),
# clickhouse_source = ExternalDictionary.ClickhouseSource(
# db = "db",
# table = "table",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# where = "where",
# secure = BoolValue(
# value = true
# )
# ),
# mongodb_source = ExternalDictionary.MongodbSource(
# db = "db",
# collection = "collection",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# options = "options"
# ),
# postgresql_source = ExternalDictionary.PostgresqlSource(
# db = "db",
# table = "table",
# hosts = ["hosts"],
# port = 0,
# user = "user",
# password = "password",
# invalidate_query = "invalidateQuery",
# ssl_mode = PostgresqlSource.SslMode.DISABLE
# )
# )],
# graphite_rollup = [ClickhouseConfig.GraphiteRollup(
# name = "name",
# patterns = [GraphiteRollup.Pattern(
# regexp = "regexp",
# function = "function",
# retention = [Pattern.Retention(
# age = 0,
# precision = 0
# )]
# )],
# path_column_name = "pathColumnName",
# time_column_name = "timeColumnName",
# value_column_name = "valueColumnName",
# version_column_name = "versionColumnName"
# )],
# kafka = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# ),
# kafka_topics = [ClickhouseConfig.KafkaTopic(
# name = "name",
# settings = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# )
# )],
# rabbitmq = ClickhouseConfig.Rabbitmq(
# username = "username",
# password = "password",
# vhost = "vhost"
# ),
# max_connections = Int64Value(
# value = 0
# ),
# max_concurrent_queries = Int64Value(
# value = 0
# ),
# keep_alive_timeout = Int64Value(
# value = 0
# ),
# uncompressed_cache_size = Int64Value(
# value = 0
# ),
# mark_cache_size = Int64Value(
# value = 0
# ),
# max_table_size_to_drop = Int64Value(
# value = 0
# ),
# max_partition_size_to_drop = Int64Value(
# value = 0
# ),
# builtin_dictionaries_reload_interval = Int64Value(
# value = 0
# ),
# timezone = "timezone",
# geobase_enabled = BoolValue(
# value = true
# ),
# geobase_uri = "geobaseUri",
# query_log_retention_size = Int64Value(
# value = 0
# ),
# query_log_retention_time = Int64Value(
# value = 0
# ),
# query_thread_log_enabled = BoolValue(
# value = true
# ),
# query_thread_log_retention_size = Int64Value(
# value = 0
# ),
# query_thread_log_retention_time = Int64Value(
# value = 0
# ),
# part_log_retention_size = Int64Value(
# value = 0
# ),
# part_log_retention_time = Int64Value(
# value = 0
# ),
# metric_log_enabled = BoolValue(
# value = true
# ),
# metric_log_retention_size = Int64Value(
# value = 0
# ),
# metric_log_retention_time = Int64Value(
# value = 0
# ),
# trace_log_enabled = BoolValue(
# value = true
# ),
# trace_log_retention_size = Int64Value(
# value = 0
# ),
# trace_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_enabled = BoolValue(
# value = true
# ),
# text_log_retention_size = Int64Value(
# value = 0
# ),
# text_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_level = ClickhouseConfig.LogLevel.TRACE,
# opentelemetry_span_log_enabled = BoolValue(
# value = true
# ),
# opentelemetry_span_log_retention_size = Int64Value(
# value = 0
# ),
# opentelemetry_span_log_retention_time = Int64Value(
# value = 0
# ),
# query_views_log_enabled = BoolValue(
# value = true
# ),
# query_views_log_retention_size = Int64Value(
# value = 0
# ),
# query_views_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_metric_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_retention_time = Int64Value(
# value = 0
# ),
# session_log_enabled = BoolValue(
# value = true
# ),
# session_log_retention_size = Int64Value(
# value = 0
# ),
# session_log_retention_time = Int64Value(
# value = 0
# ),
# zookeeper_log_enabled = BoolValue(
# value = true
# ),
# zookeeper_log_retention_size = Int64Value(
# value = 0
# ),
# zookeeper_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_insert_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_retention_time = Int64Value(
# value = 0
# ),
# background_pool_size = Int64Value(
# value = 0
# ),
# background_merges_mutations_concurrency_ratio = Int64Value(
# value = 0
# ),
# background_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_fetches_pool_size = Int64Value(
# value = 0
# ),
# background_move_pool_size = Int64Value(
# value = 0
# ),
# background_distributed_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_buffer_flush_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_message_broker_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_common_pool_size = Int64Value(
# value = 0
# ),
# default_database = StringValue(
# value = "value"
# ),
# total_memory_profiler_step = Int64Value(
# value = 0
# ),
# total_memory_tracker_sample_probability = DoubleValue(
# value = 0
# ),
# query_masking_rules = [ClickhouseConfig.QueryMaskingRule(
# name = "name",
# regexp = "regexp",
# replace = "replace"
# )],
# dictionaries_lazy_load = BoolValue(
# value = true
# ),
# query_cache = ClickhouseConfig.QueryCache(
# max_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entries = Int64Value(
# value = 0
# ),
# max_entry_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entry_size_in_rows = Int64Value(
# value = 0
# )
# )
# ),
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_size = 0,
# disk_type_id = "diskTypeId"
# ),
# weight = Int64Value(
# value = 0
# )
# )
# ),
# host_specs = [HostSpec(
# zone_id = "zoneId",
# type = Host.Type.CLICKHOUSE,
# subnet_id = "subnetId",
# assign_public_ip = true,
# shard_name = "shardName"
# )],
# copy_schema = BoolValue(
# value = true
# )
)
)
operation_result = sdk.wait_operation_and_get_result(
operation,
response_type=Shard,
meta_type=AddClusterShardMetadata,
)
print(operation_result)
AddClusterShardRequest
clusterId
: string
ID of the Redis cluster to create a shard in. To get the cluster ID use a ClusterService.List request.
shardName
: string
Name of the shard. The name must be unique within the cluster.
hostSpecs
: HostSpec
Configurations for Redis hosts that should be created with the shard. Must contain at least one element.
HostSpec
zoneId
: string
ID of the availability zone where the host resides. To get a list of available zones, use the yandex.cloud.compute.v1.ZoneService.List request.
subnetId
: string
ID of the subnet that the host should belong to. This subnet should be a part of the network that the cluster belongs to. The ID of the network is set in the field Cluster.network_id.
shardName
: string
ID of the Redis shard the host belongs to. To get the shard ID use a ClusterService.ListShards request.
replicaPriority
: google.protobuf.Int64Value
A replica with a low priority number is considered better for promotion. A replica with priority of 0 will never be selected by Redis Sentinel for promotion. Works only for non-sharded clusters. Default value is 100.
assignPublicIp
: bool
Whether the host should get a public IP address on creation.
Possible values:
- false - don't assign a public IP to the host.
- true - the host should have a public IP address.
Operation
An Operation resource. For more information, see Operation.
id
: string
ID of the operation.
description
: string
Description of the operation. 0-256 characters long.
createdAt
: google.protobuf.Timestamp
Creation timestamp.
createdBy
: string
ID of the user or service account who initiated the operation.
modifiedAt
: google.protobuf.Timestamp
The time when the Operation resource was last modified.
done
: bool
If the value is false
, it means the operation is still in progress.
If true
, the operation is completed, and either error
or response
is available.
metadata
: google.protobuf.Any
Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.
One of result
The operation result.
If done == false
and there was no failure detected, neither error
nor response
is set.
If done == false
and there was a failure detected, error
is set.
If done == true
, exactly one of error
or response
is set.
error
: google.rpc.StatusThe error result of the operation in case of failure or cancellation.
response
: google.protobuf.AnyThe normal response of the operation in case of success.
If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.