Restore
Creates a new MySQL cluster using the specified backup.
- TypeScript
- Python
import {
cloudApi,
decodeMessage,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";
const ClickhouseConfig_LogLevel =
cloudApi.mdb.clickhouse_config_clickhouse.ClickhouseConfig_LogLevel;
const Cluster = cloudApi.dataproc.cluster.Cluster;
const Cluster_Environment = cloudApi.mdb.clickhouse_cluster.Cluster_Environment;
const Compression_Method =
cloudApi.mdb.clickhouse_config_clickhouse.Compression_Method;
const Host_Type = cloudApi.mdb.clickhouse_cluster.Host_Type;
const Kafka_AutoOffsetReset =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_AutoOffsetReset;
const Kafka_Debug = cloudApi.mdb.clickhouse_config_clickhouse.Kafka_Debug;
const Kafka_SaslMechanism =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SaslMechanism;
const Kafka_SecurityProtocol =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SecurityProtocol;
const Layout_Type = cloudApi.mdb.clickhouse_config_clickhouse.Layout_Type;
const PostgresqlSource_SslMode =
cloudApi.mdb.clickhouse_config_clickhouse.PostgresqlSource_SslMode;
const RestoreClusterRequest =
cloudApi.mdb.clickhouse_cluster_service.RestoreClusterRequest;
(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.ClusterServiceClient);
const operation = await client.restore(
RestoreClusterRequest.fromPartial({
backupId: "backupId",
// additionalBackupIds: ["additionalBackupIds"],
name: "name",
// description: "description",
// labels: {"key": "labels"},
environment: Cluster_Environment.PRODUCTION,
configSpec: {
// version: "version",
// clickhouse: {
// config: {
// logLevel: ClickhouseConfig_LogLevel.TRACE,
// mergeTree: {
// replicatedDeduplicationWindow: {
// value: 0
// },
// replicatedDeduplicationWindowSeconds: {
// value: 0
// },
// partsToDelayInsert: {
// value: 0
// },
// partsToThrowInsert: {
// value: 0
// },
// inactivePartsToDelayInsert: {
// value: 0
// },
// inactivePartsToThrowInsert: {
// value: 0
// },
// maxReplicatedMergesInQueue: {
// value: 0
// },
// numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: {
// value: 0
// },
// maxBytesToMergeAtMinSpaceInPool: {
// value: 0
// },
// maxBytesToMergeAtMaxSpaceInPool: {
// value: 0
// },
// minBytesForWidePart: {
// value: 0
// },
// minRowsForWidePart: {
// value: 0
// },
// ttlOnlyDropParts: {
// value: true
// },
// allowRemoteFsZeroCopyReplication: {
// value: true
// },
// mergeWithTtlTimeout: {
// value: 0
// },
// mergeWithRecompressionTtlTimeout: {
// value: 0
// },
// maxPartsInTotal: {
// value: 0
// },
// maxNumberOfMergesWithTtlInPool: {
// value: 0
// },
// cleanupDelayPeriod: {
// value: 0
// },
// numberOfFreeEntriesInPoolToExecuteMutation: {
// value: 0
// },
// maxAvgPartSizeForTooManyParts: {
// value: 0
// },
// minAgeToForceMergeSeconds: {
// value: 0
// },
// minAgeToForceMergeOnPartitionOnly: {
// value: true
// },
// mergeSelectingSleepMs: {
// value: 0
// },
// mergeMaxBlockSize: {
// value: 0
// },
// checkSampleColumnIsCorrect: {
// value: true
// },
// maxMergeSelectingSleepMs: {
// value: 0
// },
// maxCleanupDelayPeriod: {
// value: 0
// }
// },
// compression: [{
// method: Compression_Method.LZ4,
// minPartSize: 0,
// minPartSizeRatio: 0,
// level: {
// value: 0
// }
// }],
// dictionaries: [{
// name: "name",
// structure: {
// id: {
// name: "name"
// },
// key: {
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// rangeMin: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// rangeMax: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// layout: {
// type: Layout_Type.FLAT,
// sizeInCells: 0,
// maxArraySize: 0
// },
// fixedLifetime: 0,
// lifetimeRange: {
// min: 0,
// max: 0
// },
// httpSource: {
// url: "url",
// format: "format",
// headers: [{
// name: "name",
// value: "value"
// }]
// },
// mysqlSource: {
// db: "db",
// table: "table",
// port: 0,
// user: "user",
// password: "password",
// replicas: [{
// host: "host",
// priority: 0,
// port: 0,
// user: "user",
// password: "password"
// }],
// where: "where",
// invalidateQuery: "invalidateQuery",
// closeConnection: {
// value: true
// },
// shareConnection: {
// value: true
// }
// },
// clickhouseSource: {
// db: "db",
// table: "table",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// where: "where",
// secure: {
// value: true
// }
// },
// mongodbSource: {
// db: "db",
// collection: "collection",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// options: "options"
// },
// postgresqlSource: {
// db: "db",
// table: "table",
// hosts: ["hosts"],
// port: 0,
// user: "user",
// password: "password",
// invalidateQuery: "invalidateQuery",
// sslMode: PostgresqlSource_SslMode.DISABLE
// }
// }],
// graphiteRollup: [{
// name: "name",
// patterns: [{
// regexp: "regexp",
// function: "function",
// retention: [{
// age: 0,
// precision: 0
// }]
// }],
// pathColumnName: "pathColumnName",
// timeColumnName: "timeColumnName",
// valueColumnName: "valueColumnName",
// versionColumnName: "versionColumnName"
// }],
// kafka: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// },
// kafkaTopics: [{
// name: "name",
// settings: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// }
// }],
// rabbitmq: {
// username: "username",
// password: "password",
// vhost: "vhost"
// },
// maxConnections: {
// value: 0
// },
// maxConcurrentQueries: {
// value: 0
// },
// keepAliveTimeout: {
// value: 0
// },
// uncompressedCacheSize: {
// value: 0
// },
// markCacheSize: {
// value: 0
// },
// maxTableSizeToDrop: {
// value: 0
// },
// maxPartitionSizeToDrop: {
// value: 0
// },
// builtinDictionariesReloadInterval: {
// value: 0
// },
// timezone: "timezone",
// geobaseEnabled: {
// value: true
// },
// geobaseUri: "geobaseUri",
// queryLogRetentionSize: {
// value: 0
// },
// queryLogRetentionTime: {
// value: 0
// },
// queryThreadLogEnabled: {
// value: true
// },
// queryThreadLogRetentionSize: {
// value: 0
// },
// queryThreadLogRetentionTime: {
// value: 0
// },
// partLogRetentionSize: {
// value: 0
// },
// partLogRetentionTime: {
// value: 0
// },
// metricLogEnabled: {
// value: true
// },
// metricLogRetentionSize: {
// value: 0
// },
// metricLogRetentionTime: {
// value: 0
// },
// traceLogEnabled: {
// value: true
// },
// traceLogRetentionSize: {
// value: 0
// },
// traceLogRetentionTime: {
// value: 0
// },
// textLogEnabled: {
// value: true
// },
// textLogRetentionSize: {
// value: 0
// },
// textLogRetentionTime: {
// value: 0
// },
// textLogLevel: ClickhouseConfig_LogLevel.TRACE,
// opentelemetrySpanLogEnabled: {
// value: true
// },
// opentelemetrySpanLogRetentionSize: {
// value: 0
// },
// opentelemetrySpanLogRetentionTime: {
// value: 0
// },
// queryViewsLogEnabled: {
// value: true
// },
// queryViewsLogRetentionSize: {
// value: 0
// },
// queryViewsLogRetentionTime: {
// value: 0
// },
// asynchronousMetricLogEnabled: {
// value: true
// },
// asynchronousMetricLogRetentionSize: {
// value: 0
// },
// asynchronousMetricLogRetentionTime: {
// value: 0
// },
// sessionLogEnabled: {
// value: true
// },
// sessionLogRetentionSize: {
// value: 0
// },
// sessionLogRetentionTime: {
// value: 0
// },
// zookeeperLogEnabled: {
// value: true
// },
// zookeeperLogRetentionSize: {
// value: 0
// },
// zookeeperLogRetentionTime: {
// value: 0
// },
// asynchronousInsertLogEnabled: {
// value: true
// },
// asynchronousInsertLogRetentionSize: {
// value: 0
// },
// asynchronousInsertLogRetentionTime: {
// value: 0
// },
// backgroundPoolSize: {
// value: 0
// },
// backgroundMergesMutationsConcurrencyRatio: {
// value: 0
// },
// backgroundSchedulePoolSize: {
// value: 0
// },
// backgroundFetchesPoolSize: {
// value: 0
// },
// backgroundMovePoolSize: {
// value: 0
// },
// backgroundDistributedSchedulePoolSize: {
// value: 0
// },
// backgroundBufferFlushSchedulePoolSize: {
// value: 0
// },
// backgroundMessageBrokerSchedulePoolSize: {
// value: 0
// },
// backgroundCommonPoolSize: {
// value: 0
// },
// defaultDatabase: {
// value: "value"
// },
// totalMemoryProfilerStep: {
// value: 0
// },
// totalMemoryTrackerSampleProbability: {
// value: 0
// },
// queryMaskingRules: [{
// name: "name",
// regexp: "regexp",
// replace: "replace"
// }],
// dictionariesLazyLoad: {
// value: true
// },
// queryCache: {
// maxSizeInBytes: {
// value: 0
// },
// maxEntries: {
// value: 0
// },
// maxEntrySizeInBytes: {
// value: 0
// },
// maxEntrySizeInRows: {
// value: 0
// }
// }
// },
// resources: {
// resourcePresetId: "resourcePresetId",
// diskSize: 0,
// diskTypeId: "diskTypeId"
// }
// },
// zookeeper: {
// resources: {
// resourcePresetId: "resourcePresetId",
// diskSize: 0,
// diskTypeId: "diskTypeId"
// }
// },
// backupWindowStart: {
// hours: 0,
// minutes: 0,
// seconds: 0,
// nanos: 0
// },
// access: {
// dataLens: true,
// webSql: true,
// metrika: true,
// serverless: true,
// dataTransfer: true,
// yandexQuery: true
// },
// cloudStorage: {
// enabled: true,
// moveFactor: {
// value: 0
// },
// dataCacheEnabled: {
// value: true
// },
// dataCacheMaxSize: {
// value: 0
// },
// preferNotToMerge: {
// value: true
// }
// },
// sqlDatabaseManagement: {
// value: true
// },
// sqlUserManagement: {
// value: true
// },
// adminPassword: "adminPassword",
// embeddedKeeper: {
// value: true
// },
// backupRetainPeriodDays: {
// value: 0
// }
},
// hostSpecs: [{
// zoneId: "zoneId",
// type: Host_Type.CLICKHOUSE,
// subnetId: "subnetId",
// assignPublicIp: true,
// shardName: "shardName"
// }],
networkId: "networkId",
// folderId: "folderId",
// serviceAccountId: "serviceAccountId",
// securityGroupIds: ["securityGroupIds"],
// deletionProtection: true
})
);
const finishedOp = await waitForOperation(operation, session);
if (finishedOp.response) {
const result = decodeMessage<typeof Cluster>(finishedOp.response);
console.log(result);
}
})();
import os
import grpc
import yandexcloud
import {google} from "googleapis";
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Access
from yandex.cloud.mdb.clickhouse.v1.config.clickhouse_pb2 import ClickhouseConfig
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import CloudStorage
from yandex.cloud.dataproc.v1.cluster_pb2 import Cluster
from yandex.cloud.dataproc.v1.cluster_service_pb2_grpc import ClusterServiceStub
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import ConfigSpec
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import HostSpec
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Resources
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import RestoreClusterMetadata
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import RestoreClusterRequest
const TimeOfDay = google.type.timeofday;
token = os.getenv('YC_OAUTH_TOKEN')
sdk = yandexcloud.SDK(token=token)
service = sdk.client(ClusterServiceStub)
operation = service.Restore(
RestoreClusterRequest(
backup_id = "backupId",
# additional_backup_ids = ["additionalBackupIds"],
name = "name",
# description = "description",
# labels = {"key": "labels"},
environment = Cluster.Environment.PRODUCTION,
config_spec = ConfigSpec(
# version = "version",
# clickhouse = ConfigSpec.Clickhouse(
# config = ClickhouseConfig(
# log_level = ClickhouseConfig.LogLevel.TRACE,
# merge_tree = ClickhouseConfig.MergeTree(
# replicated_deduplication_window = Int64Value(
# value = 0
# ),
# replicated_deduplication_window_seconds = Int64Value(
# value = 0
# ),
# parts_to_delay_insert = Int64Value(
# value = 0
# ),
# parts_to_throw_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_delay_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_throw_insert = Int64Value(
# value = 0
# ),
# max_replicated_merges_in_queue = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_lower_max_size_of_merge = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_min_space_in_pool = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_max_space_in_pool = Int64Value(
# value = 0
# ),
# min_bytes_for_wide_part = Int64Value(
# value = 0
# ),
# min_rows_for_wide_part = Int64Value(
# value = 0
# ),
# ttl_only_drop_parts = BoolValue(
# value = true
# ),
# allow_remote_fs_zero_copy_replication = BoolValue(
# value = true
# ),
# merge_with_ttl_timeout = Int64Value(
# value = 0
# ),
# merge_with_recompression_ttl_timeout = Int64Value(
# value = 0
# ),
# max_parts_in_total = Int64Value(
# value = 0
# ),
# max_number_of_merges_with_ttl_in_pool = Int64Value(
# value = 0
# ),
# cleanup_delay_period = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_execute_mutation = Int64Value(
# value = 0
# ),
# max_avg_part_size_for_too_many_parts = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_seconds = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_on_partition_only = BoolValue(
# value = true
# ),
# merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# merge_max_block_size = Int64Value(
# value = 0
# ),
# check_sample_column_is_correct = BoolValue(
# value = true
# ),
# max_merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# max_cleanup_delay_period = Int64Value(
# value = 0
# )
# ),
# compression = [ClickhouseConfig.Compression(
# method = Compression.Method.LZ4,
# min_part_size = 0,
# min_part_size_ratio = 0,
# level = Int64Value(
# value = 0
# )
# )],
# dictionaries = [ClickhouseConfig.ExternalDictionary(
# name = "name",
# structure = ExternalDictionary.Structure(
# id = Structure.Id(
# name = "name"
# ),
# key = Structure.Key(
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# range_min = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# range_max = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# layout = ExternalDictionary.Layout(
# type = Layout.Type.FLAT,
# size_in_cells = 0,
# max_array_size = 0
# ),
# fixed_lifetime = 0,
# lifetime_range = ExternalDictionary.Range(
# min = 0,
# max = 0
# ),
# http_source = ExternalDictionary.HttpSource(
# url = "url",
# format = "format",
# headers = [HttpSource.Header(
# name = "name",
# value = "value"
# )]
# ),
# mysql_source = ExternalDictionary.MysqlSource(
# db = "db",
# table = "table",
# port = 0,
# user = "user",
# password = "password",
# replicas = [MysqlSource.Replica(
# host = "host",
# priority = 0,
# port = 0,
# user = "user",
# password = "password"
# )],
# where = "where",
# invalidate_query = "invalidateQuery",
# close_connection = BoolValue(
# value = true
# ),
# share_connection = BoolValue(
# value = true
# )
# ),
# clickhouse_source = ExternalDictionary.ClickhouseSource(
# db = "db",
# table = "table",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# where = "where",
# secure = BoolValue(
# value = true
# )
# ),
# mongodb_source = ExternalDictionary.MongodbSource(
# db = "db",
# collection = "collection",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# options = "options"
# ),
# postgresql_source = ExternalDictionary.PostgresqlSource(
# db = "db",
# table = "table",
# hosts = ["hosts"],
# port = 0,
# user = "user",
# password = "password",
# invalidate_query = "invalidateQuery",
# ssl_mode = PostgresqlSource.SslMode.DISABLE
# )
# )],
# graphite_rollup = [ClickhouseConfig.GraphiteRollup(
# name = "name",
# patterns = [GraphiteRollup.Pattern(
# regexp = "regexp",
# function = "function",
# retention = [Pattern.Retention(
# age = 0,
# precision = 0
# )]
# )],
# path_column_name = "pathColumnName",
# time_column_name = "timeColumnName",
# value_column_name = "valueColumnName",
# version_column_name = "versionColumnName"
# )],
# kafka = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# ),
# kafka_topics = [ClickhouseConfig.KafkaTopic(
# name = "name",
# settings = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# )
# )],
# rabbitmq = ClickhouseConfig.Rabbitmq(
# username = "username",
# password = "password",
# vhost = "vhost"
# ),
# max_connections = Int64Value(
# value = 0
# ),
# max_concurrent_queries = Int64Value(
# value = 0
# ),
# keep_alive_timeout = Int64Value(
# value = 0
# ),
# uncompressed_cache_size = Int64Value(
# value = 0
# ),
# mark_cache_size = Int64Value(
# value = 0
# ),
# max_table_size_to_drop = Int64Value(
# value = 0
# ),
# max_partition_size_to_drop = Int64Value(
# value = 0
# ),
# builtin_dictionaries_reload_interval = Int64Value(
# value = 0
# ),
# timezone = "timezone",
# geobase_enabled = BoolValue(
# value = true
# ),
# geobase_uri = "geobaseUri",
# query_log_retention_size = Int64Value(
# value = 0
# ),
# query_log_retention_time = Int64Value(
# value = 0
# ),
# query_thread_log_enabled = BoolValue(
# value = true
# ),
# query_thread_log_retention_size = Int64Value(
# value = 0
# ),
# query_thread_log_retention_time = Int64Value(
# value = 0
# ),
# part_log_retention_size = Int64Value(
# value = 0
# ),
# part_log_retention_time = Int64Value(
# value = 0
# ),
# metric_log_enabled = BoolValue(
# value = true
# ),
# metric_log_retention_size = Int64Value(
# value = 0
# ),
# metric_log_retention_time = Int64Value(
# value = 0
# ),
# trace_log_enabled = BoolValue(
# value = true
# ),
# trace_log_retention_size = Int64Value(
# value = 0
# ),
# trace_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_enabled = BoolValue(
# value = true
# ),
# text_log_retention_size = Int64Value(
# value = 0
# ),
# text_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_level = ClickhouseConfig.LogLevel.TRACE,
# opentelemetry_span_log_enabled = BoolValue(
# value = true
# ),
# opentelemetry_span_log_retention_size = Int64Value(
# value = 0
# ),
# opentelemetry_span_log_retention_time = Int64Value(
# value = 0
# ),
# query_views_log_enabled = BoolValue(
# value = true
# ),
# query_views_log_retention_size = Int64Value(
# value = 0
# ),
# query_views_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_metric_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_retention_time = Int64Value(
# value = 0
# ),
# session_log_enabled = BoolValue(
# value = true
# ),
# session_log_retention_size = Int64Value(
# value = 0
# ),
# session_log_retention_time = Int64Value(
# value = 0
# ),
# zookeeper_log_enabled = BoolValue(
# value = true
# ),
# zookeeper_log_retention_size = Int64Value(
# value = 0
# ),
# zookeeper_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_insert_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_retention_time = Int64Value(
# value = 0
# ),
# background_pool_size = Int64Value(
# value = 0
# ),
# background_merges_mutations_concurrency_ratio = Int64Value(
# value = 0
# ),
# background_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_fetches_pool_size = Int64Value(
# value = 0
# ),
# background_move_pool_size = Int64Value(
# value = 0
# ),
# background_distributed_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_buffer_flush_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_message_broker_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_common_pool_size = Int64Value(
# value = 0
# ),
# default_database = StringValue(
# value = "value"
# ),
# total_memory_profiler_step = Int64Value(
# value = 0
# ),
# total_memory_tracker_sample_probability = DoubleValue(
# value = 0
# ),
# query_masking_rules = [ClickhouseConfig.QueryMaskingRule(
# name = "name",
# regexp = "regexp",
# replace = "replace"
# )],
# dictionaries_lazy_load = BoolValue(
# value = true
# ),
# query_cache = ClickhouseConfig.QueryCache(
# max_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entries = Int64Value(
# value = 0
# ),
# max_entry_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entry_size_in_rows = Int64Value(
# value = 0
# )
# )
# ),
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_size = 0,
# disk_type_id = "diskTypeId"
# )
# ),
# zookeeper = ConfigSpec.Zookeeper(
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_size = 0,
# disk_type_id = "diskTypeId"
# )
# ),
# backup_window_start = TimeOfDay(
# hours = 0,
# minutes = 0,
# seconds = 0,
# nanos = 0
# ),
# access = Access(
# data_lens = true,
# web_sql = true,
# metrika = true,
# serverless = true,
# data_transfer = true,
# yandex_query = true
# ),
# cloud_storage = CloudStorage(
# enabled = true,
# move_factor = DoubleValue(
# value = 0
# ),
# data_cache_enabled = BoolValue(
# value = true
# ),
# data_cache_max_size = Int64Value(
# value = 0
# ),
# prefer_not_to_merge = BoolValue(
# value = true
# )
# ),
# sql_database_management = BoolValue(
# value = true
# ),
# sql_user_management = BoolValue(
# value = true
# ),
# admin_password = "adminPassword",
# embedded_keeper = BoolValue(
# value = true
# ),
# backup_retain_period_days = Int64Value(
# value = 0
# )
),
# host_specs = [HostSpec(
# zone_id = "zoneId",
# type = Host.Type.CLICKHOUSE,
# subnet_id = "subnetId",
# assign_public_ip = true,
# shard_name = "shardName"
# )],
network_id = "networkId",
# folder_id = "folderId",
# service_account_id = "serviceAccountId",
# security_group_ids = ["securityGroupIds"],
# deletion_protection = true
)
)
operation_result = sdk.wait_operation_and_get_result(
operation,
response_type=Cluster,
meta_type=RestoreClusterMetadata,
)
print(operation_result)
RestoreClusterRequest
backupId
: string
ID of the backup to create a cluster from. To get the backup ID, use a ClusterService.ListBackups request.
time
: google.protobuf.Timestamp
Timestamp of the moment to which the MySQL cluster should be restored.
name
: string
Name of the new MySQL cluster. The name must be unique within the folder.
description
: string
Description of the new MySQL cluster.
labels
: string
Custom labels for the MySQL cluster as key:value
pairs. Maximum 64 per resource.
For example, "project": "mvp" or "source": "dictionary".
environment
: Cluster.Environment
Deployment environment of the new MySQL cluster.
configSpec
: ConfigSpec
Configuration for the MySQL cluster to be created.
hostSpecs
: HostSpec
Configurations for MySQL hosts that should be added to the cluster that is being created from the backup.
networkId
: string
ID of the network to create the MySQL cluster in.
ConfigSpec
version
: string
Version of MySQL used in the cluster. Possible values:
- 5.7
One of mysqlConfig
Configuration of a MySQL cluster.
mysqlConfig_5_7
: config.MysqlConfig5_7Configuration for a MySQL 5.7 cluster.
resources
: Resources
Resources allocated to MySQL hosts.
backupWindowStart
: google.type.TimeOfDay
Time to start the daily backup, in the UTC timezone.
HostSpec
zoneId
: string
ID of the availability zone where the host resides. To get a list of available zones, use the yandex.cloud.compute.v1.ZoneService.List request.
subnetId
: string
ID of the subnet that the host should belong to. This subnet should be a part of the network that the cluster belongs to. The ID of the network is set in the field Cluster.network_id.
assignPublicIp
: bool
Whether the host should get a public IP address on creation.
After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign a public IP to a host without one, recreate the host with assign_public_ip set as needed.
Possible values:
- false - don't assign a public IP to the host.
- true - the host should have a public IP address.
MysqlConfig5_7
Options and structure of MysqlConfig5_7
reflects MySQL 5.7 configuration file
innodbBufferPoolSize
: google.protobuf.Int64Value
Size of the InnoDB buffer pool used for caching table and index data.
For details, see MySQL documentation for the parameter.
maxConnections
: google.protobuf.Int64Value
The maximum permitted number of simultaneous client connections.
For details, see MySQL documentation for the variable.
longQueryTime
: google.protobuf.DoubleValue
Time that it takes to process a query before it is considered slow.
For details, see MySQL documentation for the variable.
Resources
resourcePresetId
: string
ID of the preset for computational resources available to a host (CPU, memory etc.). All available presets are listed in the documentation.
diskSize
: int64
Volume of the storage available to a host.
diskTypeId
: string
Type of the storage environment for the host. Possible values:
- network-ssd - network SSD drive,
- local-ssd - local SSD storage.
Operation
An Operation resource. For more information, see Operation.
id
: string
ID of the operation.
description
: string
Description of the operation. 0-256 characters long.
createdAt
: google.protobuf.Timestamp
Creation timestamp.
createdBy
: string
ID of the user or service account who initiated the operation.
modifiedAt
: google.protobuf.Timestamp
The time when the Operation resource was last modified.
done
: bool
If the value is false
, it means the operation is still in progress.
If true
, the operation is completed, and either error
or response
is available.
metadata
: google.protobuf.Any
Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.
One of result
The operation result.
If done == false
and there was no failure detected, neither error
nor response
is set.
If done == false
and there was a failure detected, error
is set.
If done == true
, exactly one of error
or response
is set.
error
: google.rpc.StatusThe error result of the operation in case of failure or cancellation.
response
: google.protobuf.AnyThe normal response of the operation in case of success.
If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.