Create
Creates a new Apache Kafka® connector in a cluster.
- TypeScript
- Python
import {
cloudApi,
decodeMessage,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";
const Connector = cloudApi.mdb.kafka_connector.Connector;
const CreateConnectorRequest =
cloudApi.mdb.kafka_connector_service.CreateConnectorRequest;
(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.ConnectorServiceClient);
const operation = await client.create(
CreateConnectorRequest.fromPartial({
clusterId: "clusterId",
connectorSpec: {
// name: "name",
// tasksMax: {
// value: 0
// },
// properties: {"key": "properties"},
// connectorConfigMirrormaker: {
// sourceCluster: {
// alias: "alias",
// thisCluster: {
// },
// externalCluster: {
// bootstrapServers: "bootstrapServers",
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// saslMechanism: "saslMechanism",
// securityProtocol: "securityProtocol",
// sslTruststoreCertificates: "sslTruststoreCertificates"
// }
// },
// targetCluster: {
// alias: "alias",
// thisCluster: {
// },
// externalCluster: {
// bootstrapServers: "bootstrapServers",
// saslUsername: "saslUsername",
// saslPassword: "saslPassword",
// saslMechanism: "saslMechanism",
// securityProtocol: "securityProtocol",
// sslTruststoreCertificates: "sslTruststoreCertificates"
// }
// },
// topics: "topics",
// replicationFactor: {
// value: 0
// }
// },
// connectorConfigS3Sink: {
// topics: "topics",
// fileCompressionType: "fileCompressionType",
// fileMaxRecords: {
// value: 0
// },
// s3Connection: {
// bucketName: "bucketName",
// externalS3: {
// accessKeyId: "accessKeyId",
// secretAccessKey: "secretAccessKey",
// endpoint: "endpoint",
// region: "region"
// }
// }
// }
},
})
);
const finishedOp = await waitForOperation(operation, session);
if (finishedOp.response) {
const result = decodeMessage<typeof Connector>(finishedOp.response);
console.log(result);
}
})();
import os
import grpc
import yandexcloud
from yandex.cloud.mdb.kafka.v1.connector_pb2 import ClusterConnectionSpec
from yandex.cloud.mdb.kafka.v1.connector_pb2 import Connector
from yandex.cloud.mdb.kafka.v1.connector_pb2 import ConnectorConfigMirrorMakerSpec
from yandex.cloud.mdb.kafka.v1.connector_pb2 import ConnectorConfigS3SinkSpec
from yandex.cloud.mdb.kafka.v1.connector_service_pb2_grpc import ConnectorServiceStub
from yandex.cloud.mdb.kafka.v1.connector_pb2 import ConnectorSpec
from yandex.cloud.mdb.kafka.v1.connector_service_pb2 import CreateConnectorMetadata
from yandex.cloud.mdb.kafka.v1.connector_service_pb2 import CreateConnectorRequest
from yandex.cloud.mdb.kafka.v1.connector_pb2 import ExternalClusterConnectionSpec
from yandex.cloud.mdb.kafka.v1.connector_pb2 import ExternalS3StorageSpec
from yandex.cloud.mdb.kafka.v1.connector_pb2 import S3ConnectionSpec
from yandex.cloud.mdb.kafka.v1.connector_pb2 import ThisClusterSpec
token = os.getenv("YC_OAUTH_TOKEN")
sdk = yandexcloud.SDK(token=token)
service = sdk.client(ConnectorServiceStub)
operation = service.Create(
CreateConnectorRequest(
cluster_id="clusterId",
connector_spec=ConnectorSpec(
# name = "name",
# tasks_max = Int64Value(
# value = 0
# ),
# properties = {"key": "properties"},
# connector_config_mirrormaker = ConnectorConfigMirrorMakerSpec(
# source_cluster = ClusterConnectionSpec(
# alias = "alias",
# this_cluster = ThisClusterSpec(
# ),
# external_cluster = ExternalClusterConnectionSpec(
# bootstrap_servers = "bootstrapServers",
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# sasl_mechanism = "saslMechanism",
# security_protocol = "securityProtocol",
# ssl_truststore_certificates = "sslTruststoreCertificates"
# )
# ),
# target_cluster = ClusterConnectionSpec(
# alias = "alias",
# this_cluster = ThisClusterSpec(
# ),
# external_cluster = ExternalClusterConnectionSpec(
# bootstrap_servers = "bootstrapServers",
# sasl_username = "saslUsername",
# sasl_password = "saslPassword",
# sasl_mechanism = "saslMechanism",
# security_protocol = "securityProtocol",
# ssl_truststore_certificates = "sslTruststoreCertificates"
# )
# ),
# topics = "topics",
# replication_factor = Int64Value(
# value = 0
# )
# ),
# connector_config_s_3_sink = ConnectorConfigS3SinkSpec(
# topics = "topics",
# file_compression_type = "fileCompressionType",
# file_max_records = Int64Value(
# value = 0
# ),
# s_3_connection = S3ConnectionSpec(
# bucket_name = "bucketName",
# external_s_3 = ExternalS3StorageSpec(
# access_key_id = "accessKeyId",
# secret_access_key = "secretAccessKey",
# endpoint = "endpoint",
# region = "region"
# )
# )
# )
),
)
)
operation_result = sdk.wait_operation_and_get_result(
operation,
response_type=Connector,
meta_type=CreateConnectorMetadata,
)
print(operation_result)
CreateConnectorRequest
clusterId
: string
ID of the Apache Kafka® cluster to create the connector in.
To get this ID, make a ClusterService.List request.
connectorSpec
: ConnectorSpec
Configuration of the connector to create.
ConnectorSpec
An object that represents an Apache Kafka® connector.
See the documentation for details.
name
: string
Name of the connector.
tasksMax
: google.protobuf.Int64Value
Maximum number of connector tasks. Default value is the number of brokers.
properties
: string
A set of properties passed to Managed Service for Apache Kafka® with the connector configuration.
Example: sync.topics.config.enabled: true
.
One of connectorConfig
Additional settings for the connector.
connectorConfigMirrormaker
: ConnectorConfigMirrorMakerSpecConfiguration of the MirrorMaker connector.
connectorConfigS3Sink
: ConnectorConfigS3SinkSpecConfiguration of S3-Sink connector.
ConnectorConfigMirrorMakerSpec
sourceCluster
: ClusterConnectionSpec
Source cluster configuration for the MirrorMaker connector.
targetCluster
: ClusterConnectionSpec
Target cluster configuration for the MirrorMaker connector.
topics
: string
List of Kafka topics, separated by ,
.
replicationFactor
: google.protobuf.Int64Value
Replication factor for automatically created topics.
ConnectorConfigS3SinkSpec
Specification for Kafka S3-Sink Connector.
topics
: string
List of Kafka topics, separated by ','.
fileCompressionType
: string
The compression type used for files put on GCS.
The supported values are: gzip
, snappy
, zstd
, none
.
Optional, the default is none
.
fileMaxRecords
: google.protobuf.Int64Value
Max records per file.
s3Connection
: S3ConnectionSpec
Credentials for connecting to S3 storage.
ClusterConnectionSpec
alias
: string
Alias of cluster connection configuration.
Examples: source
, target
.
One of clusterConnection
Type of connection to Apache Kafka® cluster.
thisCluster
: ThisClusterSpecConnection configuration of the cluster the connector belongs to. As all credentials are already known, leave this parameter empty.
externalCluster
: ExternalClusterConnectionSpecConfiguration of connection to an external cluster with all the necessary credentials.
S3ConnectionSpec
Specification for S3Connection - settings of connection to AWS-compatible S3 storage, that are source or target of Kafka S3-connectors. YC Object Storage is AWS-compatible.
bucketName
: string
One of storage
externalS3
: ExternalS3StorageSpec
ThisClusterSpec
ExternalClusterConnectionSpec
bootstrapServers
: string
List of bootstrap servers of the cluster, separated by ,
.
saslUsername
: string
SASL username to use for connection to the cluster.
saslPassword
: string
SASL password to use for connection to the cluster.
saslMechanism
: string
SASL mechanism to use for connection to the cluster.
securityProtocol
: string
Security protocol to use for connection to the cluster.
sslTruststoreCertificates
: string
CA in PEM format to connect to external cluster. Lines of certificate separated by '\n' symbol.
ExternalS3StorageSpec
accessKeyId
: string
secretAccessKey
: string
endpoint
: string
region
: string
Default is 'us-east-1'.
Operation
An Operation resource. For more information, see Operation.
id
: string
ID of the operation.
description
: string
Description of the operation. 0-256 characters long.
createdAt
: google.protobuf.Timestamp
Creation timestamp.
createdBy
: string
ID of the user or service account who initiated the operation.
modifiedAt
: google.protobuf.Timestamp
The time when the Operation resource was last modified.
done
: bool
If the value is false
, it means the operation is still in progress.
If true
, the operation is completed, and either error
or response
is available.
metadata
: google.protobuf.Any
Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.
One of result
The operation result.
If done == false
and there was no failure detected, neither error
nor response
is set.
If done == false
and there was a failure detected, error
is set.
If done == true
, exactly one of error
or response
is set.
error
: google.rpc.StatusThe error result of the operation in case of failure or cancellation.
response
: google.protobuf.AnyThe normal response of the operation in case of success.
If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.