Update
Updates the specified Elasticsearch cluster.
- TypeScript
- Python
import {
cloudApi,
decodeMessage,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";
const Cluster = cloudApi.dataproc.cluster.Cluster;
const HadoopConfig_Service = cloudApi.dataproc.cluster.HadoopConfig_Service;
const UpdateClusterRequest =
cloudApi.dataproc.cluster_service.UpdateClusterRequest;
(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.ClusterServiceClient);
const operation = await client.update(
UpdateClusterRequest.fromPartial({
// clusterId: "clusterId",
// updateMask: {
// paths: ["paths"]
// },
// description: "description",
// labels: {"key": "labels"},
// configSpec: {
// subclustersSpec: [{
// id: "id",
// name: "name",
// resources: {
// resourcePresetId: "resourcePresetId",
// diskTypeId: "diskTypeId",
// diskSize: 0
// },
// hostsCount: 0,
// autoscalingConfig: {
// maxHostsCount: 0,
// preemptible: true,
// measurementDuration: {
// seconds: 0,
// nanos: 0
// },
// warmupDuration: {
// seconds: 0,
// nanos: 0
// },
// stabilizationDuration: {
// seconds: 0,
// nanos: 0
// },
// cpuUtilizationTarget: 0,
// decommissionTimeout: 0
// }
// }],
// hadoop: {
// services: [HadoopConfig_Service.HDFS],
// properties: {"key": "properties"},
// sshPublicKeys: ["sshPublicKeys"],
// initializationActions: [{
// uri: "uri",
// args: ["args"],
// timeout: 0
// }]
// }
// },
// name: "name",
// serviceAccountId: "serviceAccountId",
// bucket: "bucket",
// decommissionTimeout: 0,
// uiProxy: true,
// securityGroupIds: ["securityGroupIds"],
// deletionProtection: true,
// logGroupId: "logGroupId"
})
);
const finishedOp = await waitForOperation(operation, session);
if (finishedOp.response) {
const result = decodeMessage<typeof Cluster>(finishedOp.response);
console.log(result);
}
})();
import os
import grpc
import yandexcloud
from yandex.cloud.dataproc.v1.subcluster_pb2 import AutoscalingConfig
from yandex.cloud.dataproc.v1.cluster_pb2 import Cluster
from yandex.cloud.dataproc.v1.cluster_service_pb2_grpc import ClusterServiceStub
from yandex.cloud.dataproc.v1.cluster_pb2 import HadoopConfig
from yandex.cloud.dataproc.v1.cluster_pb2 import InitializationAction
from yandex.cloud.dataproc.v1.common_pb2 import Resources
from yandex.cloud.dataproc.v1.cluster_service_pb2 import UpdateClusterConfigSpec
from yandex.cloud.dataproc.v1.cluster_service_pb2 import UpdateClusterMetadata
from yandex.cloud.dataproc.v1.cluster_service_pb2 import UpdateClusterRequest
from yandex.cloud.dataproc.v1.cluster_service_pb2 import UpdateSubclusterConfigSpec
token = os.getenv("YC_OAUTH_TOKEN")
sdk = yandexcloud.SDK(token=token)
service = sdk.client(ClusterServiceStub)
operation = service.Update(
UpdateClusterRequest(
# cluster_id = "clusterId",
# update_mask = FieldMask.FromJsonString("field1,field2"),
# description = "description",
# labels = {"key": "labels"},
# config_spec = UpdateClusterConfigSpec(
# subclusters_spec = [UpdateSubclusterConfigSpec(
# id = "id",
# name = "name",
# resources = Resources(
# resource_preset_id = "resourcePresetId",
# disk_type_id = "diskTypeId",
# disk_size = 0
# ),
# hosts_count = 0,
# autoscaling_config = AutoscalingConfig(
# max_hosts_count = 0,
# preemptible = true,
# measurement_duration = Duration(
# seconds = 0,
# nanos = 0
# ),
# warmup_duration = Duration(
# seconds = 0,
# nanos = 0
# ),
# stabilization_duration = Duration(
# seconds = 0,
# nanos = 0
# ),
# cpu_utilization_target = 0,
# decommission_timeout = 0
# )
# )],
# hadoop = HadoopConfig(
# services = [HadoopConfig.Service.HDFS],
# properties = {"key": "properties"},
# ssh_public_keys = ["sshPublicKeys"],
# initialization_actions = [InitializationAction(
# uri = "uri",
# args = ["args"],
# timeout = 0
# )]
# )
# ),
# name = "name",
# service_account_id = "serviceAccountId",
# bucket = "bucket",
# decommission_timeout = 0,
# ui_proxy = true,
# security_group_ids = ["securityGroupIds"],
# deletion_protection = true,
# log_group_id = "logGroupId"
)
)
operation_result = sdk.wait_operation_and_get_result(
operation,
response_type=Cluster,
meta_type=UpdateClusterMetadata,
)
print(operation_result)
UpdateClusterRequest
clusterId
: string
ID of the Elasticsearch cluster to update.
To get the Elasticsearch cluster ID, make a ClusterService.List request.
updateMask
: google.protobuf.FieldMask
description
: string
New description of the Elasticsearch cluster.
labels
: string
Custom labels for the Elasticsearch cluster as key:value
pairs.
For example, "project": "mvp" or "source": "dictionary".
The new set of labels will completely replace the old ones. To add a label, request the current set with the ClusterService.Get method, then send an ClusterService.Update request with the new label added to the set.
configSpec
: ConfigSpecUpdate
New configuration and resources for hosts in the Elasticsearch cluster.
Use update_mask to prevent reverting all cluster settings that are not listed in config_spec
to their default values.
name
: string
New name for the Elasticsearch cluster.
securityGroupIds
: string
User security groups
serviceAccountId
: string
ID of the service account used for access to Object Storage.
deletionProtection
: bool
Deletion Protection inhibits deletion of the cluster
maintenanceWindow
: MaintenanceWindow
Window of maintenance operations.
networkId
: string
ID of the network to move the cluster to.
ConfigSpecUpdate
version
: string
Elasticsearch version.
elasticsearchSpec
: ElasticsearchSpec
Configuration and resource allocation for Elasticsearch nodes.
edition
: string
ElasticSearch edition.
adminPassword
: string
ElasticSearch admin password.
MaintenanceWindow
One of policy
anytime
: AnytimeMaintenanceWindow
weeklyMaintenanceWindow
: WeeklyMaintenanceWindow
ElasticsearchSpec
DataNode
One of config
Elasticsearch data node configuration.
elasticsearchConfig_7
: config.ElasticsearchConfig7
resources
: Resources
Resources allocated to Elasticsearch data nodes.
MasterNode
resources
: Resources
Resources allocated to Elasticsearch master nodes.
dataNode
: DataNode
Configuration and resource allocation for Elasticsearch data nodes.
masterNode
: MasterNode
Configuration and resource allocation for Elasticsearch master nodes.
plugins
: string
Cluster wide plugins
AnytimeMaintenanceWindow
WeeklyMaintenanceWindow
WeekDay
WEEK_DAY_UNSPECIFIED
MON
TUE
WED
THU
FRI
SAT
SUN
day
: WeekDay
hour
: int64
Hour of the day in UTC.
ElasticsearchConfig7
Elasticsearch 7.x supported configuration options are listed here.
Detailed description for each set of options is available in Elasticsearch documentation.
Any options that are not listed here are not supported.
maxClauseCount
: google.protobuf.Int64Value
The maximum number of clauses a boolean query can contain.
The limit is in place to prevent searches from becoming too large and taking up too much CPU and memory.
It affects not only Elasticsearch's bool
query, but many other queries that are implicitly converted to bool
query by Elastcsearch.
Default value: 1024
.
See in-depth description in Elasticsearch documentation.
fielddataCacheSize
: string
The maximum percentage or absolute value (10%, 512mb) of heap space that is allocated to field data cache.
All the field values that are placed in this cache, get loaded to memory in order to provide fast document based access to those values. Building the field data cache for a field can be an expensive operations, so its recommended to have enough memory for this cache, and to keep it loaded.
Default value: unbounded.
See in-depth description in Elasticsearch documentation.
reindexRemoteWhitelist
: string
Remote hosts for reindex have to be explicitly allowed in elasticsearch.yml using the reindex.remote.whitelist property. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored, only the host and port are used.
reindexSslCaPath
: string
List of paths to PEM encoded certificate files that should be trusted.
See in-depth description in Elasticsearch documentation
Resources
Computational resources.
resourcePresetId
: string
ID of the preset for computational resources available to a host (CPU, memory etc.). All available presets are listed in the documentation.
diskSize
: int64
Volume of the storage available to a host, in bytes.
diskTypeId
: string
Type of the storage environment for the host. All available types are listed in the documentation.
DataNode
elasticsearchConfig_7
: config.ElasticsearchConfig7
resources
: Resources
Resources allocated to Elasticsearch data nodes.
MasterNode
resources
: Resources
Resources allocated to Elasticsearch master nodes.
Operation
An Operation resource. For more information, see Operation.
id
: string
ID of the operation.
description
: string
Description of the operation. 0-256 characters long.
createdAt
: google.protobuf.Timestamp
Creation timestamp.
createdBy
: string
ID of the user or service account who initiated the operation.
modifiedAt
: google.protobuf.Timestamp
The time when the Operation resource was last modified.
done
: bool
If the value is false
, it means the operation is still in progress.
If true
, the operation is completed, and either error
or response
is available.
metadata
: google.protobuf.Any
Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.
One of result
The operation result.
If done == false
and there was no failure detected, neither error
nor response
is set.
If done == false
and there was a failure detected, error
is set.
If done == true
, exactly one of error
or response
is set.
error
: google.rpc.StatusThe error result of the operation in case of failure or cancellation.
response
: google.protobuf.AnyThe normal response of the operation in case of success.
If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.