Restore
Creates a new Redis cluster using the specified backup.
- TypeScript
- Python
import {
cloudApi,
decodeMessage,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";
const ClickhouseConfig_LogLevel =
cloudApi.mdb.clickhouse_config_clickhouse.ClickhouseConfig_LogLevel;
const Cluster = cloudApi.dataproc.cluster.Cluster;
const Cluster_Environment = cloudApi.mdb.clickhouse_cluster.Cluster_Environment;
const Compression_Method =
cloudApi.mdb.clickhouse_config_clickhouse.Compression_Method;
const Host_Type = cloudApi.mdb.clickhouse_cluster.Host_Type;
const Kafka_AutoOffsetReset =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_AutoOffsetReset;
const Kafka_Debug = cloudApi.mdb.clickhouse_config_clickhouse.Kafka_Debug;
const Kafka_SaslMechanism =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SaslMechanism;
const Kafka_SecurityProtocol =
cloudApi.mdb.clickhouse_config_clickhouse.Kafka_SecurityProtocol;
const Layout_Type = cloudApi.mdb.clickhouse_config_clickhouse.Layout_Type;
const PostgresqlSource_SslMode =
cloudApi.mdb.clickhouse_config_clickhouse.PostgresqlSource_SslMode;
const RestoreClusterRequest =
cloudApi.mdb.clickhouse_cluster_service.RestoreClusterRequest;
(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.ClusterServiceClient);
const operation = await client.restore(
RestoreClusterRequest.fromPartial({
backupId: "backupId",
// additionalBackupIds: ["additionalBackupIds"],
name: "name",
// description: "description",
// labels: {"key": "labels"},
environment: Cluster_Environment.PRODUCTION,
configSpec: {
// version: "version",
// clickhouse: {
// config: {
// logLevel: ClickhouseConfig_LogLevel.TRACE,
// mergeTree: {
// replicatedDeduplicationWindow: {
// value: 0
// },
// replicatedDeduplicationWindowSeconds: {
// value: 0
// },
// partsToDelayInsert: {
// value: 0
// },
// partsToThrowInsert: {
// value: 0
// },
// inactivePartsToDelayInsert: {
// value: 0
// },
// inactivePartsToThrowInsert: {
// value: 0
// },
// maxReplicatedMergesInQueue: {
// value: 0
// },
// numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: {
// value: 0
// },
// maxBytesToMergeAtMinSpaceInPool: {
// value: 0
// },
// maxBytesToMergeAtMaxSpaceInPool: {
// value: 0
// },
// minBytesForWidePart: {
// value: 0
// },
// minRowsForWidePart: {
// value: 0
// },
// ttlOnlyDropParts: {
// value: true
// },
// allowRemoteFsZeroCopyReplication: {
// value: true
// },
// mergeWithTtlTimeout: {
// value: 0
// },
// mergeWithRecompressionTtlTimeout: {
// value: 0
// },
// maxPartsInTotal: {
// value: 0
// },
// maxNumberOfMergesWithTtlInPool: {
// value: 0
// },
// cleanupDelayPeriod: {
// value: 0
// },
// numberOfFreeEntriesInPoolToExecuteMutation: {
// value: 0
// },
// maxAvgPartSizeForTooManyParts: {
// value: 0
// },
// minAgeToForceMergeSeconds: {
// value: 0
// },
// minAgeToForceMergeOnPartitionOnly: {
// value: true
// },
// mergeSelectingSleepMs: {
// value: 0
// },
// mergeMaxBlockSize: {
// value: 0
// },
// checkSampleColumnIsCorrect: {
// value: true
// },
// maxMergeSelectingSleepMs: {
// value: 0
// },
// maxCleanupDelayPeriod: {
// value: 0
// }
// },
// compression: [{
// method: Compression_Method.LZ4,
// minPartSize: 0,
// minPartSizeRatio: 0,
// level: {
// value: 0
// }
// }],
// dictionaries: [{
// name: "name",
// structure: {
// id: {
// name: "name"
// },
// key: {
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// rangeMin: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// rangeMax: {
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// },
// attributes: [{
// name: "name",
// type: "type",
// nullValue: "nullValue",
// expression: "expression",
// hierarchical: true,
// injective: true
// }]
// },
// layout: {
// type: Layout_Type.FLAT,
// sizeInCells: 0,
// maxArraySize: 0
// },
// fixedLifetime: 0,
// lifetimeRange: {
// min: 0,
// max: 0
// },
// httpSource: {
// url: "url",
// format: "format",
// headers: [{
// name: "name",
// value: "value"
// }]
// },
// mysqlSource: {
// db: "db",
// table: "table",
// port: 0,
// user: "user",
// password: "password",
// replicas: [{
// host: "host",
// priority: 0,
// port: 0,
// user: "user",
// password: "password"
// }],
// where: "where",
// invalidateQuery: "invalidateQuery",
// closeConnection: {
// value: true
// },
// shareConnection: {
// value: true
// }
// },
// clickhouseSource: {
// db: "db",
// table: "table",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// where: "where",
// secure: {
// value: true
// }
// },
// mongodbSource: {
// db: "db",
// collection: "collection",
// host: "host",
// port: 0,
// user: "user",
// password: "password",
// options: "options"
// },
// postgresqlSource: {
// db: "db",
// table: "table",
// hosts: ["hosts"],
// port: 0,
// user: "user",
// password: "password",
// invalidateQuery: "invalidateQuery",
// sslMode: PostgresqlSource_SslMode.DISABLE
// }
// }],
// graphiteRollup: [{
// name: "name",
// patterns: [{
// regexp: "regexp",
// function: "function",
// retention: [{
// age: 0,
// precision: 0
// }]
// }],
// pathColumnName: "pathColumnName",
// timeColumnName: "timeColumnName",
// valueColumnName: "valueColumnName",
// versionColumnName: "versionColumnName"
// }],
// kafka: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// },
// kafkaTopics: [{
// name: "name",
// settings: {
// securityProtocol: Kafka_SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
// saslMechanism: Kafka_SaslMechanism.SASL_MECHANISM_GSSAPI,
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// enableSslCertificateVerification: {
// value: true
// },
// maxPollIntervalMs: {
// value: 0
// },
// sessionTimeoutMs: {
// value: 0
// },
// debug: Kafka_Debug.DEBUG_GENERIC,
// autoOffsetReset: Kafka_AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
// }
// }],
// rabbitmq: {
// username: "username",
// password: "password",
// vhost: "vhost"
// },
// maxConnections: {
// value: 0
// },
// maxConcurrentQueries: {
// value: 0
// },
// keepAliveTimeout: {
// value: 0
// },
// uncompressedCacheSize: {
// value: 0
// },
// markCacheSize: {
// value: 0
// },
// maxTableSizeToDrop: {
// value: 0
// },
// maxPartitionSizeToDrop: {
// value: 0
// },
// builtinDictionariesReloadInterval: {
// value: 0
// },
// timezone: "timezone",
// geobaseEnabled: {
// value: true
// },
// geobaseUri: "geobaseUri",
// queryLogRetentionSize: {
// value: 0
// },
// queryLogRetentionTime: {
// value: 0
// },
// queryThreadLogEnabled: {
// value: true
// },
// queryThreadLogRetentionSize: {
// value: 0
// },
// queryThreadLogRetentionTime: {
// value: 0
// },
// partLogRetentionSize: {
// value: 0
// },
// partLogRetentionTime: {
// value: 0
// },
// metricLogEnabled: {
// value: true
// },
// metricLogRetentionSize: {
// value: 0
// },
// metricLogRetentionTime: {
// value: 0
// },
// traceLogEnabled: {
// value: true
// },
// traceLogRetentionSize: {
// value: 0
// },
// traceLogRetentionTime: {
// value: 0
// },
// textLogEnabled: {
// value: true
// },
// textLogRetentionSize: {
// value: 0
// },
// textLogRetentionTime: {
// value: 0
// },
// textLogLevel: ClickhouseConfig_LogLevel.TRACE,
// opentelemetrySpanLogEnabled: {
// value: true
// },
// opentelemetrySpanLogRetentionSize: {
// value: 0
// },
// opentelemetrySpanLogRetentionTime: {
// value: 0
// },
// queryViewsLogEnabled: {
// value: true
// },
// queryViewsLogRetentionSize: {
// value: 0
// },
// queryViewsLogRetentionTime: {
// value: 0
// },
// asynchronousMetricLogEnabled: {
// value: true
// },
// asynchronousMetricLogRetentionSize: {
// value: 0
// },
// asynchronousMetricLogRetentionTime: {
// value: 0
// },
// sessionLogEnabled: {
// value: true
// },
// sessionLogRetentionSize: {
// value: 0
// },
// sessionLogRetentionTime: {
// value: 0
// },
// zookeeperLogEnabled: {
// value: true
// },
// zookeeperLogRetentionSize: {
// value: 0
// },
// zookeeperLogRetentionTime: {
// value: 0
// },
// asynchronousInsertLogEnabled: {
// value: true
// },
// asynchronousInsertLogRetentionSize: {
// value: 0
// },
// asynchronousInsertLogRetentionTime: {
// value: 0
// },
// backgroundPoolSize: {
// value: 0
// },
// backgroundMergesMutationsConcurrencyRatio: {
// value: 0
// },
// backgroundSchedulePoolSize: {
// value: 0
// },
// backgroundFetchesPoolSize: {
// value: 0
// },
// backgroundMovePoolSize: {
// value: 0
// },
// backgroundDistributedSchedulePoolSize: {
// value: 0
// },
// backgroundBufferFlushSchedulePoolSize: {
// value: 0
// },
// backgroundMessageBrokerSchedulePoolSize: {
// value: 0
// },
// backgroundCommonPoolSize: {
// value: 0
// },
// defaultDatabase: {
// value: "value"
// },
// totalMemoryProfilerStep: {
// value: 0
// },
// totalMemoryTrackerSampleProbability: {
// value: 0
// },
// queryMaskingRules: [{
// name: "name",
// regexp: "regexp",
// replace: "replace"
// }],
// dictionariesLazyLoad: {
// value: true
// },
// queryCache: {
// maxSizeInBytes: {
// value: 0
// },
// maxEntries: {
// value: 0
// },
// maxEntrySizeInBytes: {
// value: 0
// },
// maxEntrySizeInRows: {
// value: 0
// }
// }
// },
// resources: {
// resourcePresetId: "resourcePresetId",
// diskSize: 0,
// diskTypeId: "diskTypeId"
// }
// },
// zookeeper: {
// resources: {
// resourcePresetId: "resourcePresetId",
// diskSize: 0,
// diskTypeId: "diskTypeId"
// }
// },
// backupWindowStart: {
// hours: 0,
// minutes: 0,
// seconds: 0,
// nanos: 0
// },
// access: {
// dataLens: true,
// webSql: true,
// metrika: true,
// serverless: true,
// dataTransfer: true,
// yandexQuery: true
// },
// cloudStorage: {
// enabled: true,
// moveFactor: {
// value: 0
// },
// dataCacheEnabled: {
// value: true
// },
// dataCacheMaxSize: {
// value: 0
// },
// preferNotToMerge: {
// value: true
// }
// },
// sqlDatabaseManagement: {
// value: true
// },
// sqlUserManagement: {
// value: true
// },
// adminPassword: "adminPassword",
// embeddedKeeper: {
// value: true
// },
// backupRetainPeriodDays: {
// value: 0
// }
},
// hostSpecs: [{
// zoneId: "zoneId",
// type: Host_Type.CLICKHOUSE,
// subnetId: "subnetId",
// assignPublicIp: true,
// shardName: "shardName"
// }],
networkId: "networkId",
// folderId: "folderId",
// serviceAccountId: "serviceAccountId",
// securityGroupIds: ["securityGroupIds"],
// deletionProtection: true
})
);
const finishedOp = await waitForOperation(operation, session);
if (finishedOp.response) {
const result = decodeMessage<typeof Cluster>(finishedOp.response);
console.log(result);
}
})();
import os
import grpc
import yandexcloud
import {google} from "googleapis";
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Access
from yandex.cloud.mdb.clickhouse.v1.config.clickhouse_pb2 import ClickhouseConfig
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import CloudStorage
from yandex.cloud.dataproc.v1.cluster_pb2 import Cluster
from yandex.cloud.dataproc.v1.cluster_service_pb2_grpc import ClusterServiceStub
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import ConfigSpec
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import HostSpec
from yandex.cloud.mdb.clickhouse.v1.cluster_pb2 import Resources
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import RestoreClusterMetadata
from yandex.cloud.mdb.clickhouse.v1.cluster_service_pb2 import RestoreClusterRequest
const TimeOfDay = google.type.timeofday;
token = os.getenv('YC_OAUTH_TOKEN')
sdk = yandexcloud.SDK(token=token)
service = sdk.client(ClusterServiceStub)
operation = service.Restore(
RestoreClusterRequest(
backup_id = "backupId",
# additional_backup_ids = ["additionalBackupIds"],
name = "name",
# description = "description",
# labels = {"key": "labels"},
environment = Cluster.Environment.PRODUCTION,
config_spec = ConfigSpec(
# version = "version",
# clickhouse = ConfigSpec.Clickhouse(
# config = ClickhouseConfig(
# log_level = ClickhouseConfig.LogLevel.TRACE,
# merge_tree = ClickhouseConfig.MergeTree(
# replicated_deduplication_window = Int64Value(
# value = 0
# ),
# replicated_deduplication_window_seconds = Int64Value(
# value = 0
# ),
# parts_to_delay_insert = Int64Value(
# value = 0
# ),
# parts_to_throw_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_delay_insert = Int64Value(
# value = 0
# ),
# inactive_parts_to_throw_insert = Int64Value(
# value = 0
# ),
# max_replicated_merges_in_queue = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_lower_max_size_of_merge = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_min_space_in_pool = Int64Value(
# value = 0
# ),
# max_bytes_to_merge_at_max_space_in_pool = Int64Value(
# value = 0
# ),
# min_bytes_for_wide_part = Int64Value(
# value = 0
# ),
# min_rows_for_wide_part = Int64Value(
# value = 0
# ),
# ttl_only_drop_parts = BoolValue(
# value = true
# ),
# allow_remote_fs_zero_copy_replication = BoolValue(
# value = true
# ),
# merge_with_ttl_timeout = Int64Value(
# value = 0
# ),
# merge_with_recompression_ttl_timeout = Int64Value(
# value = 0
# ),
# max_parts_in_total = Int64Value(
# value = 0
# ),
# max_number_of_merges_with_ttl_in_pool = Int64Value(
# value = 0
# ),
# cleanup_delay_period = Int64Value(
# value = 0
# ),
# number_of_free_entries_in_pool_to_execute_mutation = Int64Value(
# value = 0
# ),
# max_avg_part_size_for_too_many_parts = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_seconds = Int64Value(
# value = 0
# ),
# min_age_to_force_merge_on_partition_only = BoolValue(
# value = true
# ),
# merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# merge_max_block_size = Int64Value(
# value = 0
# ),
# check_sample_column_is_correct = BoolValue(
# value = true
# ),
# max_merge_selecting_sleep_ms = Int64Value(
# value = 0
# ),
# max_cleanup_delay_period = Int64Value(
# value = 0
# )
# ),
# compression = [ClickhouseConfig.Compression(
# method = Compression.Method.LZ4,
# min_part_size = 0,
# min_part_size_ratio = 0,
# level = Int64Value(
# value = 0
# )
# )],
# dictionaries = [ClickhouseConfig.ExternalDictionary(
# name = "name",
# structure = ExternalDictionary.Structure(
# id = Structure.Id(
# name = "name"
# ),
# key = Structure.Key(
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# range_min = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# range_max = Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# ),
# attributes = [Structure.Attribute(
# name = "name",
# type = "type",
# null_value = "nullValue",
# expression = "expression",
# hierarchical = true,
# injective = true
# )]
# ),
# layout = ExternalDictionary.Layout(
# type = Layout.Type.FLAT,
# size_in_cells = 0,
# max_array_size = 0
# ),
# fixed_lifetime = 0,
# lifetime_range = ExternalDictionary.Range(
# min = 0,
# max = 0
# ),
# http_source = ExternalDictionary.HttpSource(
# url = "url",
# format = "format",
# headers = [HttpSource.Header(
# name = "name",
# value = "value"
# )]
# ),
# mysql_source = ExternalDictionary.MysqlSource(
# db = "db",
# table = "table",
# port = 0,
# user = "user",
# password = "password",
# replicas = [MysqlSource.Replica(
# host = "host",
# priority = 0,
# port = 0,
# user = "user",
# password = "password"
# )],
# where = "where",
# invalidate_query = "invalidateQuery",
# close_connection = BoolValue(
# value = true
# ),
# share_connection = BoolValue(
# value = true
# )
# ),
# clickhouse_source = ExternalDictionary.ClickhouseSource(
# db = "db",
# table = "table",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# where = "where",
# secure = BoolValue(
# value = true
# )
# ),
# mongodb_source = ExternalDictionary.MongodbSource(
# db = "db",
# collection = "collection",
# host = "host",
# port = 0,
# user = "user",
# password = "password",
# options = "options"
# ),
# postgresql_source = ExternalDictionary.PostgresqlSource(
# db = "db",
# table = "table",
# hosts = ["hosts"],
# port = 0,
# user = "user",
# password = "password",
# invalidate_query = "invalidateQuery",
# ssl_mode = PostgresqlSource.SslMode.DISABLE
# )
# )],
# graphite_rollup = [ClickhouseConfig.GraphiteRollup(
# name = "name",
# patterns = [GraphiteRollup.Pattern(
# regexp = "regexp",
# function = "function",
# retention = [Pattern.Retention(
# age = 0,
# precision = 0
# )]
# )],
# path_column_name = "pathColumnName",
# time_column_name = "timeColumnName",
# value_column_name = "valueColumnName",
# version_column_name = "versionColumnName"
# )],
# kafka = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# ),
# kafka_topics = [ClickhouseConfig.KafkaTopic(
# name = "name",
# settings = ClickhouseConfig.Kafka(
# security_protocol = Kafka.SecurityProtocol.SECURITY_PROTOCOL_PLAINTEXT,
# sasl_mechanism = Kafka.SaslMechanism.SASL_MECHANISM_GSSAPI,
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# enable_ssl_certificate_verification = BoolValue(
# value = true
# ),
# max_poll_interval_ms = Int64Value(
# value = 0
# ),
# session_timeout_ms = Int64Value(
# value = 0
# ),
# debug = Kafka.Debug.DEBUG_GENERIC,
# auto_offset_reset = Kafka.AutoOffsetReset.AUTO_OFFSET_RESET_SMALLEST
# )
# )],
# rabbitmq = ClickhouseConfig.Rabbitmq(
# username = "username",
# password = "password",
# vhost = "vhost"
# ),
# max_connections = Int64Value(
# value = 0
# ),
# max_concurrent_queries = Int64Value(
# value = 0
# ),
# keep_alive_timeout = Int64Value(
# value = 0
# ),
# uncompressed_cache_size = Int64Value(
# value = 0
# ),
# mark_cache_size = Int64Value(
# value = 0
# ),
# max_table_size_to_drop = Int64Value(
# value = 0
# ),
# max_partition_size_to_drop = Int64Value(
# value = 0
# ),
# builtin_dictionaries_reload_interval = Int64Value(
# value = 0
# ),
# timezone = "timezone",
# geobase_enabled = BoolValue(
# value = true
# ),
# geobase_uri = "geobaseUri",
# query_log_retention_size = Int64Value(
# value = 0
# ),
# query_log_retention_time = Int64Value(
# value = 0
# ),
# query_thread_log_enabled = BoolValue(
# value = true
# ),
# query_thread_log_retention_size = Int64Value(
# value = 0
# ),
# query_thread_log_retention_time = Int64Value(
# value = 0
# ),
# part_log_retention_size = Int64Value(
# value = 0
# ),
# part_log_retention_time = Int64Value(
# value = 0
# ),
# metric_log_enabled = BoolValue(
# value = true
# ),
# metric_log_retention_size = Int64Value(
# value = 0
# ),
# metric_log_retention_time = Int64Value(
# value = 0
# ),
# trace_log_enabled = BoolValue(
# value = true
# ),
# trace_log_retention_size = Int64Value(
# value = 0
# ),
# trace_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_enabled = BoolValue(
# value = true
# ),
# text_log_retention_size = Int64Value(
# value = 0
# ),
# text_log_retention_time = Int64Value(
# value = 0
# ),
# text_log_level = ClickhouseConfig.LogLevel.TRACE,
# opentelemetry_span_log_enabled = BoolValue(
# value = true
# ),
# opentelemetry_span_log_retention_size = Int64Value(
# value = 0
# ),
# opentelemetry_span_log_retention_time = Int64Value(
# value = 0
# ),
# query_views_log_enabled = BoolValue(
# value = true
# ),
# query_views_log_retention_size = Int64Value(
# value = 0
# ),
# query_views_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_metric_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_metric_log_retention_time = Int64Value(
# value = 0
# ),
# session_log_enabled = BoolValue(
# value = true
# ),
# session_log_retention_size = Int64Value(
# value = 0
# ),
# session_log_retention_time = Int64Value(
# value = 0
# ),
# zookeeper_log_enabled = BoolValue(
# value = true
# ),
# zookeeper_log_retention_size = Int64Value(
# value = 0
# ),
# zookeeper_log_retention_time = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_enabled = BoolValue(
# value = true
# ),
# asynchronous_insert_log_retention_size = Int64Value(
# value = 0
# ),
# asynchronous_insert_log_retention_time = Int64Value(
# value = 0
# ),
# background_pool_size = Int64Value(
# value = 0
# ),
# background_merges_mutations_concurrency_ratio = Int64Value(
# value = 0
# ),
# background_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_fetches_pool_size = Int64Value(
# value = 0
# ),
# background_move_pool_size = Int64Value(
# value = 0
# ),
# background_distributed_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_buffer_flush_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_message_broker_schedule_pool_size = Int64Value(
# value = 0
# ),
# background_common_pool_size = Int64Value(
# value = 0
# ),
# default_database = StringValue(
# value = "value"
# ),
# total_memory_profiler_step = Int64Value(
# value = 0
# ),
# total_memory_tracker_sample_probability = DoubleValue(
# value = 0
# ),
# query_masking_rules = [ClickhouseConfig.QueryMaskingRule(
# name = "name",
# regexp = "regexp",
# replace = "replace"
# )],
# dictionaries_lazy_load = BoolValue(
# value = true
# ),
# query_cache = ClickhouseConfig.QueryCache(
# max_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entries = Int64Value(
# value = 0
# ),
# max_entry_size_in_bytes = Int64Value(
# value = 0
# ),
# max_entry_size_in_rows = Int64Value(
# value = 0
# )
# )
# ),
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_size = 0,
# disk_type_id = "diskTypeId"
# )
# ),
# zookeeper = ConfigSpec.Zookeeper(
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_size = 0,
# disk_type_id = "diskTypeId"
# )
# ),
# backup_window_start = TimeOfDay(
# hours = 0,
# minutes = 0,
# seconds = 0,
# nanos = 0
# ),
# access = Access(
# data_lens = true,
# web_sql = true,
# metrika = true,
# serverless = true,
# data_transfer = true,
# yandex_query = true
# ),
# cloud_storage = CloudStorage(
# enabled = true,
# move_factor = DoubleValue(
# value = 0
# ),
# data_cache_enabled = BoolValue(
# value = true
# ),
# data_cache_max_size = Int64Value(
# value = 0
# ),
# prefer_not_to_merge = BoolValue(
# value = true
# )
# ),
# sql_database_management = BoolValue(
# value = true
# ),
# sql_user_management = BoolValue(
# value = true
# ),
# admin_password = "adminPassword",
# embedded_keeper = BoolValue(
# value = true
# ),
# backup_retain_period_days = Int64Value(
# value = 0
# )
),
# host_specs = [HostSpec(
# zone_id = "zoneId",
# type = Host.Type.CLICKHOUSE,
# subnet_id = "subnetId",
# assign_public_ip = true,
# shard_name = "shardName"
# )],
network_id = "networkId",
# folder_id = "folderId",
# service_account_id = "serviceAccountId",
# security_group_ids = ["securityGroupIds"],
# deletion_protection = true
)
)
operation_result = sdk.wait_operation_and_get_result(
operation,
response_type=Cluster,
meta_type=RestoreClusterMetadata,
)
print(operation_result)
RestoreClusterRequest
backupId
: string
ID of the backup to create a cluster from. To get the backup ID, use a ClusterService.ListBackups request.
name
: string
Name of the new Redis cluster. The name must be unique within the folder.
description
: string
Description of the new Redis cluster.
labels
: string
Custom labels for the Redis cluster as key:value
pairs. Maximum 64 per cluster.
For example, "project": "mvp" or "source": "dictionary".
environment
: Cluster.Environment
Deployment environment of the new Redis cluster.
configSpec
: ConfigSpec
Configuration for the Redis cluster to be created.
hostSpecs
: HostSpec
Configurations for Redis hosts that should be created for the cluster that is being created from the backup.
networkId
: string
ID of the network to create the Redis cluster in.
folderId
: string
ID of the folder to create the Redis cluster in.
securityGroupIds
: string
User security groups
tlsEnabled
: google.protobuf.BoolValue
TLS port and functionality on\off
persistenceMode
: Cluster.PersistenceMode
Persistence mode
deletionProtection
: bool
Deletion Protection inhibits deletion of the cluster
announceHostnames
: bool
Enable FQDN instead of ip
maintenanceWindow
: MaintenanceWindow
Window of maintenance operations.
ConfigSpec
version
: string
Version of Redis used in the cluster.
One of redisSpec
Configuration of a Redis cluster.
redisConfig_5_0
: config.RedisConfig5_0
redisConfig_6_0
: config.RedisConfig6_0
redisConfig_6_2
: config.RedisConfig6_2
redisConfig_7_0
: config.RedisConfig7_0
resources
: Resources
Resources allocated to Redis hosts.
backupWindowStart
: google.type.TimeOfDay
Time to start the daily backup, in the UTC timezone.
access
: Access
Access policy to DB
redis
: config.RedisConfig
Unified configuration of a Redis cluster
diskSizeAutoscaling
: DiskSizeAutoscaling
Disk size autoscaling settings
HostSpec
zoneId
: string
ID of the availability zone where the host resides. To get a list of available zones, use the yandex.cloud.compute.v1.ZoneService.List request.
subnetId
: string
ID of the subnet that the host should belong to. This subnet should be a part of the network that the cluster belongs to. The ID of the network is set in the field Cluster.network_id.
shardName
: string
ID of the Redis shard the host belongs to. To get the shard ID use a ClusterService.ListShards request.
replicaPriority
: google.protobuf.Int64Value
A replica with a low priority number is considered better for promotion. A replica with priority of 0 will never be selected by Redis Sentinel for promotion. Works only for non-sharded clusters. Default value is 100.
assignPublicIp
: bool
Whether the host should get a public IP address on creation.
Possible values:
- false - don't assign a public IP to the host.
- true - the host should have a public IP address.
MaintenanceWindow
A maintenance window settings.
One of policy
The maintenance policy in effect.
anytime
: AnytimeMaintenanceWindowMaintenance operation can be scheduled anytime.
weeklyMaintenanceWindow
: WeeklyMaintenanceWindowMaintenance operation can be scheduled on a weekly basis.
RedisConfig5_0
Fields and structure of RedisConfig
reflects Redis configuration file
parameters.
MaxmemoryPolicy
MAXMEMORY_POLICY_UNSPECIFIED
VOLATILE_LRU
Try to remove less recently used (LRU) keys with
expire set
.ALLKEYS_LRU
Remove less recently used (LRU) keys.
VOLATILE_LFU
Try to remove least frequently used (LFU) keys with
expire set
.ALLKEYS_LFU
Remove least frequently used (LFU) keys.
VOLATILE_RANDOM
Try to remove keys with
expire set
randomly.ALLKEYS_RANDOM
Remove keys randomly.
VOLATILE_TTL
Try to remove less recently used (LRU) keys with
expire set
and shorter TTL first.NOEVICTION
Return errors when memory limit was reached and commands could require more memory to be used.
ClientOutputBufferLimit
hardLimit
: google.protobuf.Int64Value
Total limit in bytes.
softLimit
: google.protobuf.Int64Value
Limit in bytes during certain time period.
softSeconds
: google.protobuf.Int64Value
Seconds for soft limit.
maxmemoryPolicy
: MaxmemoryPolicy
Redis key eviction policy for a dataset that reaches maximum memory, available to the host. Redis maxmemory setting depends on Managed Service for Redis host class.
All policies are described in detail in Redis documentation.
timeout
: google.protobuf.Int64Value
Time that Redis keeps the connection open while the client is idle. If no new command is sent during that time, the connection is closed.
password
: string
Authentication password.
databases
: google.protobuf.Int64Value
Number of database buckets on a single redis-server process.
slowlogLogSlowerThan
: google.protobuf.Int64Value
Threshold for logging slow requests to server in microseconds (log only slower than it).
slowlogMaxLen
: google.protobuf.Int64Value
Max slow requests number to log.
notifyKeyspaceEvents
: string
String setting for pub\sub functionality.
clientOutputBufferLimitPubsub
: ClientOutputBufferLimit
Redis connection output buffers limits for pubsub operations.
clientOutputBufferLimitNormal
: ClientOutputBufferLimit
Redis connection output buffers limits for clients.
RedisConfig6_0
Fields and structure of RedisConfig
reflects Redis configuration file
parameters.
MaxmemoryPolicy
MAXMEMORY_POLICY_UNSPECIFIED
VOLATILE_LRU
Try to remove less recently used (LRU) keys with
expire set
.ALLKEYS_LRU
Remove less recently used (LRU) keys.
VOLATILE_LFU
Try to remove least frequently used (LFU) keys with
expire set
.ALLKEYS_LFU
Remove least frequently used (LFU) keys.
VOLATILE_RANDOM
Try to remove keys with
expire set
randomly.ALLKEYS_RANDOM
Remove keys randomly.
VOLATILE_TTL
Try to remove less recently used (LRU) keys with
expire set
and shorter TTL first.NOEVICTION
Return errors when memory limit was reached and commands could require more memory to be used.
ClientOutputBufferLimit
hardLimit
: google.protobuf.Int64Value
Total limit in bytes.
softLimit
: google.protobuf.Int64Value
Limit in bytes during certain time period.
softSeconds
: google.protobuf.Int64Value
Seconds for soft limit.
maxmemoryPolicy
: MaxmemoryPolicy
Redis key eviction policy for a dataset that reaches maximum memory, available to the host. Redis maxmemory setting depends on Managed Service for Redis host class.
All policies are described in detail in Redis documentation.
timeout
: google.protobuf.Int64Value
Time that Redis keeps the connection open while the client is idle. If no new command is sent during that time, the connection is closed.
password
: string
Authentication password.
databases
: google.protobuf.Int64Value
Number of database buckets on a single redis-server process.
slowlogLogSlowerThan
: google.protobuf.Int64Value
Threshold for logging slow requests to server in microseconds (log only slower than it).
slowlogMaxLen
: google.protobuf.Int64Value
Max slow requests number to log.
notifyKeyspaceEvents
: string
String setting for pub\sub functionality.
clientOutputBufferLimitPubsub
: ClientOutputBufferLimit
Redis connection output buffers limits for pubsub operations.
clientOutputBufferLimitNormal
: ClientOutputBufferLimit
Redis connection output buffers limits for clients.
RedisConfig6_2
Fields and structure of RedisConfig
reflects Redis configuration file
parameters.
MaxmemoryPolicy
MAXMEMORY_POLICY_UNSPECIFIED
VOLATILE_LRU
Try to remove less recently used (LRU) keys with
expire set
.ALLKEYS_LRU
Remove less recently used (LRU) keys.
VOLATILE_LFU
Try to remove least frequently used (LFU) keys with
expire set
.ALLKEYS_LFU
Remove least frequently used (LFU) keys.
VOLATILE_RANDOM
Try to remove keys with
expire set
randomly.ALLKEYS_RANDOM
Remove keys randomly.
VOLATILE_TTL
Try to remove less recently used (LRU) keys with
expire set
and shorter TTL first.NOEVICTION
Return errors when memory limit was reached and commands could require more memory to be used.
ClientOutputBufferLimit
hardLimit
: google.protobuf.Int64Value
Total limit in bytes.
softLimit
: google.protobuf.Int64Value
Limit in bytes during certain time period.
softSeconds
: google.protobuf.Int64Value
Seconds for soft limit.
maxmemoryPolicy
: MaxmemoryPolicy
Redis key eviction policy for a dataset that reaches maximum memory, available to the host. Redis maxmemory setting depends on Managed Service for Redis host class.
All policies are described in detail in Redis documentation.
timeout
: google.protobuf.Int64Value
Time that Redis keeps the connection open while the client is idle. If no new command is sent during that time, the connection is closed.
password
: string
Authentication password.
databases
: google.protobuf.Int64Value
Number of database buckets on a single redis-server process.
slowlogLogSlowerThan
: google.protobuf.Int64Value
Threshold for logging slow requests to server in microseconds (log only slower than it).
slowlogMaxLen
: google.protobuf.Int64Value
Max slow requests number to log.
notifyKeyspaceEvents
: string
String setting for pub\sub functionality.
clientOutputBufferLimitPubsub
: ClientOutputBufferLimit
Redis connection output buffers limits for pubsub operations.
clientOutputBufferLimitNormal
: ClientOutputBufferLimit
Redis connection output buffers limits for clients.
maxmemoryPercent
: google.protobuf.Int64Value
Redis maxmemory percent
RedisConfig7_0
Fields and structure of RedisConfig
reflects Redis configuration file
parameters.
MaxmemoryPolicy
MAXMEMORY_POLICY_UNSPECIFIED
VOLATILE_LRU
Try to remove less recently used (LRU) keys with
expire set
.ALLKEYS_LRU
Remove less recently used (LRU) keys.
VOLATILE_LFU
Try to remove least frequently used (LFU) keys with
expire set
.ALLKEYS_LFU
Remove least frequently used (LFU) keys.
VOLATILE_RANDOM
Try to remove keys with
expire set
randomly.ALLKEYS_RANDOM
Remove keys randomly.
VOLATILE_TTL
Try to remove less recently used (LRU) keys with
expire set
and shorter TTL first.NOEVICTION
Return errors when memory limit was reached and commands could require more memory to be used.
ClientOutputBufferLimit
hardLimit
: google.protobuf.Int64Value
Total limit in bytes.
softLimit
: google.protobuf.Int64Value
Limit in bytes during certain time period.
softSeconds
: google.protobuf.Int64Value
Seconds for soft limit.
maxmemoryPolicy
: MaxmemoryPolicy
Redis key eviction policy for a dataset that reaches maximum memory, available to the host. Redis maxmemory setting depends on Managed Service for Redis host class.
All policies are described in detail in Redis documentation.
timeout
: google.protobuf.Int64Value
Time that Redis keeps the connection open while the client is idle. If no new command is sent during that time, the connection is closed.
password
: string
Authentication password.
databases
: google.protobuf.Int64Value
Number of database buckets on a single redis-server process.
slowlogLogSlowerThan
: google.protobuf.Int64Value
Threshold for logging slow requests to server in microseconds (log only slower than it).
slowlogMaxLen
: google.protobuf.Int64Value
Max slow requests number to log.
notifyKeyspaceEvents
: string
String setting for pub\sub functionality.
clientOutputBufferLimitPubsub
: ClientOutputBufferLimit
Redis connection output buffers limits for pubsub operations.
clientOutputBufferLimitNormal
: ClientOutputBufferLimit
Redis connection output buffers limits for clients.
maxmemoryPercent
: google.protobuf.Int64Value
Redis maxmemory percent
Resources
resourcePresetId
: string
ID of the preset for computational resources available to a host (CPU, memory etc.). All available presets are listed in the documentation.
diskSize
: int64
Volume of the storage available to a host, in bytes.
diskTypeId
: string
Type of the storage environment for the host. Possible values:
- network-ssd - network SSD drive,
- local-ssd - local SSD storage.
Access
dataLens
: bool
Allow access for DataLens
webSql
: bool
Allow access for Web SQL.
RedisConfig
Fields and structure of RedisConfig
reflects Redis configuration file
parameters.
MaxmemoryPolicy
MAXMEMORY_POLICY_UNSPECIFIED
VOLATILE_LRU
Try to remove less recently used (LRU) keys with
expire set
.ALLKEYS_LRU
Remove less recently used (LRU) keys.
VOLATILE_LFU
Try to remove least frequently used (LFU) keys with
expire set
.ALLKEYS_LFU
Remove least frequently used (LFU) keys.
VOLATILE_RANDOM
Try to remove keys with
expire set
randomly.ALLKEYS_RANDOM
Remove keys randomly.
VOLATILE_TTL
Try to remove less recently used (LRU) keys with
expire set
and shorter TTL first.NOEVICTION
Return errors when memory limit was reached and commands could require more memory to be used.
ClientOutputBufferLimit
hardLimit
: google.protobuf.Int64Value
Total limit in bytes.
softLimit
: google.protobuf.Int64Value
Limit in bytes during certain time period.
softSeconds
: google.protobuf.Int64Value
Seconds for soft limit.
maxmemoryPolicy
: MaxmemoryPolicy
Redis key eviction policy for a dataset that reaches maximum memory, available to the host. Redis maxmemory setting depends on Managed Service for Redis host class.
All policies are described in detail in Redis documentation.
timeout
: google.protobuf.Int64Value
Time that Redis keeps the connection open while the client is idle. If no new command is sent during that time, the connection is closed.
password
: string
Authentication password.
databases
: google.protobuf.Int64Value
Number of database buckets on a single redis-server process.
slowlogLogSlowerThan
: google.protobuf.Int64Value
Threshold for logging slow requests to server in microseconds (log only slower than it).
slowlogMaxLen
: google.protobuf.Int64Value
Max slow requests number to log.
notifyKeyspaceEvents
: string
String setting for pub\sub functionality.
clientOutputBufferLimitPubsub
: ClientOutputBufferLimit
Redis connection output buffers limits for pubsub operations.
clientOutputBufferLimitNormal
: ClientOutputBufferLimit
Redis connection output buffers limits for clients.
maxmemoryPercent
: google.protobuf.Int64Value
Redis maxmemory percent
luaTimeLimit
: google.protobuf.Int64Value
Maximum time in milliseconds for Lua scripts, 0 - disabled mechanism
replBacklogSizePercent
: google.protobuf.Int64Value
Replication backlog size as a percentage of flavor maxmemory
clusterRequireFullCoverage
: google.protobuf.BoolValue
Controls whether all hash slots must be covered by nodes
clusterAllowReadsWhenDown
: google.protobuf.BoolValue
Allows read operations when cluster is down
clusterAllowPubsubshardWhenDown
: google.protobuf.BoolValue
Permits Pub/Sub shard operations when cluster is down
lfuDecayTime
: google.protobuf.Int64Value
The time, in minutes, that must elapse in order for the key counter to be divided by two (or decremented if it has a value less <= 10)
lfuLogFactor
: google.protobuf.Int64Value
Determines how the frequency counter represents key hits.
turnBeforeSwitchover
: google.protobuf.BoolValue
Allows to turn before switchover in RDSync
allowDataLoss
: google.protobuf.BoolValue
Allows some data to be lost in favor of faster switchover/restart
DiskSizeAutoscaling
plannedUsageThreshold
: google.protobuf.Int64Value
Amount of used storage for automatic disk scaling in the maintenance window, 0 means disabled, in percent.
emergencyUsageThreshold
: google.protobuf.Int64Value
Amount of used storage for immediately automatic disk scaling, 0 means disabled, in percent.
diskSizeLimit
: google.protobuf.Int64Value
Limit on how large the storage for database instances can automatically grow, in bytes.
AnytimeMaintenanceWindow
WeeklyMaintenanceWindow
Weelky maintenance window settings.
WeekDay
WEEK_DAY_UNSPECIFIED
MON
TUE
WED
THU
FRI
SAT
SUN
day
: WeekDay
Day of the week (in DDD
format).
hour
: int64
Hour of the day in UTC (in HH
format).
ClientOutputBufferLimit
hardLimit
: google.protobuf.Int64Value
Total limit in bytes.
softLimit
: google.protobuf.Int64Value
Limit in bytes during certain time period.
softSeconds
: google.protobuf.Int64Value
Seconds for soft limit.
Operation
An Operation resource. For more information, see Operation.
id
: string
ID of the operation.
description
: string
Description of the operation. 0-256 characters long.
createdAt
: google.protobuf.Timestamp
Creation timestamp.
createdBy
: string
ID of the user or service account who initiated the operation.
modifiedAt
: google.protobuf.Timestamp
The time when the Operation resource was last modified.
done
: bool
If the value is false
, it means the operation is still in progress.
If true
, the operation is completed, and either error
or response
is available.
metadata
: google.protobuf.Any
Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.
One of result
The operation result.
If done == false
and there was no failure detected, neither error
nor response
is set.
If done == false
and there was a failure detected, error
is set.
If done == true
, exactly one of error
or response
is set.
error
: google.rpc.StatusThe error result of the operation in case of failure or cancellation.
response
: google.protobuf.AnyThe normal response of the operation in case of success.
If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.