Skip to main content

Create

import {
cloudApi,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";

const CleanupPolicy = cloudApi.datatransfer.endpoint_common.CleanupPolicy;
const ClickhouseCleanupPolicy =
cloudApi.datatransfer.endpoint_clickhouse.ClickhouseCleanupPolicy;
const ColumnType = cloudApi.datatransfer.endpoint_common.ColumnType;
const CreateEndpointRequest =
cloudApi.datatransfer.endpoint_service.CreateEndpointRequest;
const KafkaMechanism = cloudApi.datatransfer.endpoint_kafka.KafkaMechanism;
const MetrikaStreamType =
cloudApi.datatransfer.endpoint_metrika.MetrikaStreamType;
const ObjectTransferStage =
cloudApi.datatransfer.endpoint_common.ObjectTransferStage;
const YdbCleanupPolicy = cloudApi.datatransfer.endpoint_ydb.YdbCleanupPolicy;
const YdbDefaultCompression =
cloudApi.datatransfer.endpoint_ydb.YdbDefaultCompression;
const YdsCompressionCodec =
cloudApi.datatransfer.endpoint_yds.YdsCompressionCodec;

(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.EndpointServiceClient);

const operation = await client.create(
CreateEndpointRequest.fromPartial({
// folderId: "folderId",
// name: "name",
// description: "description",
// labels: {"key": "labels"},
// settings: {
// mysqlSource: {
// connection: {
// mdbClusterId: "mdbClusterId",
// onPremise: {
// port: 0,
// subnetId: "subnetId",
// hosts: ["hosts"],
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// }
// },
// database: "database",
// user: "user",
// password: {
// raw: "raw"
// },
// timezone: "timezone",
// objectTransferSettings: {
// view: ObjectTransferStage.BEFORE_DATA,
// routine: ObjectTransferStage.BEFORE_DATA,
// trigger: ObjectTransferStage.BEFORE_DATA,
// tables: ObjectTransferStage.BEFORE_DATA
// },
// includeTablesRegex: ["includeTablesRegex"],
// excludeTablesRegex: ["excludeTablesRegex"],
// securityGroups: ["securityGroups"],
// serviceDatabase: "serviceDatabase"
// },
// postgresSource: {
// connection: {
// mdbClusterId: "mdbClusterId",
// onPremise: {
// port: 0,
// subnetId: "subnetId",
// hosts: ["hosts"],
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// }
// },
// database: "database",
// user: "user",
// password: {
// raw: "raw"
// },
// includeTables: ["includeTables"],
// excludeTables: ["excludeTables"],
// slotByteLagLimit: 0,
// serviceSchema: "serviceSchema",
// objectTransferSettings: {
// sequence: ObjectTransferStage.BEFORE_DATA,
// sequenceOwnedBy: ObjectTransferStage.BEFORE_DATA,
// table: ObjectTransferStage.BEFORE_DATA,
// primaryKey: ObjectTransferStage.BEFORE_DATA,
// fkConstraint: ObjectTransferStage.BEFORE_DATA,
// defaultValues: ObjectTransferStage.BEFORE_DATA,
// constraint: ObjectTransferStage.BEFORE_DATA,
// index: ObjectTransferStage.BEFORE_DATA,
// view: ObjectTransferStage.BEFORE_DATA,
// function: ObjectTransferStage.BEFORE_DATA,
// trigger: ObjectTransferStage.BEFORE_DATA,
// type: ObjectTransferStage.BEFORE_DATA,
// rule: ObjectTransferStage.BEFORE_DATA,
// collation: ObjectTransferStage.BEFORE_DATA,
// policy: ObjectTransferStage.BEFORE_DATA,
// cast: ObjectTransferStage.BEFORE_DATA,
// materializedView: ObjectTransferStage.BEFORE_DATA,
// sequenceSet: ObjectTransferStage.BEFORE_DATA
// },
// securityGroups: ["securityGroups"]
// },
// ydbSource: {
// database: "database",
// instance: "instance",
// paths: ["paths"],
// serviceAccountId: "serviceAccountId",
// subnetId: "subnetId",
// saKeyContent: "saKeyContent",
// securityGroups: ["securityGroups"],
// changefeedCustomName: "changefeedCustomName"
// },
// ydsSource: {
// database: "database",
// stream: "stream",
// serviceAccountId: "serviceAccountId",
// supportedCodecs: [YdsCompressionCodec.YDS_COMPRESSION_CODEC_RAW],
// parser: {
// jsonParser: {
// dataSchema: {
// jsonFields: "jsonFields",
// fields: {
// fields: [{
// name: "name",
// type: ColumnType.INT32,
// key: true,
// required: true,
// path: "path"
// }]
// }
// },
// nullKeysAllowed: true,
// addRestColumn: true,
// unescapeStringValues: true
// },
// auditTrailsV1Parser: {
// },
// cloudLoggingParser: {
// },
// tskvParser: {
// dataSchema: {
// jsonFields: "jsonFields",
// fields: {
// fields: [{
// name: "name",
// type: ColumnType.INT32,
// key: true,
// required: true,
// path: "path"
// }]
// }
// },
// nullKeysAllowed: true,
// addRestColumn: true,
// unescapeStringValues: true
// }
// },
// allowTtlRewind: true,
// endpoint: "endpoint",
// subnetId: "subnetId",
// securityGroups: ["securityGroups"],
// consumer: "consumer"
// },
// kafkaSource: {
// connection: {
// clusterId: "clusterId",
// onPremise: {
// brokerUrls: ["brokerUrls"],
// subnetId: "subnetId",
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// }
// },
// auth: {
// sasl: {
// user: "user",
// mechanism: KafkaMechanism.KAFKA_MECHANISM_SHA256,
// password: {
// raw: "raw"
// }
// },
// noAuth: {
// }
// },
// securityGroups: ["securityGroups"],
// topicName: "topicName",
// transformer: {
// cloudFunction: "cloudFunction",
// numberOfRetries: 0,
// bufferSize: "bufferSize",
// bufferFlushInterval: "bufferFlushInterval",
// invocationTimeout: "invocationTimeout",
// serviceAccountId: "serviceAccountId"
// },
// parser: {
// jsonParser: {
// dataSchema: {
// jsonFields: "jsonFields",
// fields: {
// fields: [{
// name: "name",
// type: ColumnType.INT32,
// key: true,
// required: true,
// path: "path"
// }]
// }
// },
// nullKeysAllowed: true,
// addRestColumn: true,
// unescapeStringValues: true
// },
// auditTrailsV1Parser: {
// },
// cloudLoggingParser: {
// },
// tskvParser: {
// dataSchema: {
// jsonFields: "jsonFields",
// fields: {
// fields: [{
// name: "name",
// type: ColumnType.INT32,
// key: true,
// required: true,
// path: "path"
// }]
// }
// },
// nullKeysAllowed: true,
// addRestColumn: true,
// unescapeStringValues: true
// }
// },
// topicNames: ["topicNames"]
// },
// mongoSource: {
// connection: {
// connectionOptions: {
// mdbClusterId: "mdbClusterId",
// onPremise: {
// hosts: ["hosts"],
// port: 0,
// replicaSet: "replicaSet",
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// },
// user: "user",
// password: {
// raw: "raw"
// },
// authSource: "authSource"
// }
// },
// subnetId: "subnetId",
// collections: [{
// databaseName: "databaseName",
// collectionName: "collectionName"
// }],
// excludedCollections: [{
// databaseName: "databaseName",
// collectionName: "collectionName"
// }],
// secondaryPreferredMode: true,
// securityGroups: ["securityGroups"]
// },
// clickhouseSource: {
// connection: {
// connectionOptions: {
// onPremise: {
// shards: [{
// name: "name",
// hosts: ["hosts"]
// }],
// httpPort: 0,
// nativePort: 0,
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// },
// mdbClusterId: "mdbClusterId",
// user: "user",
// password: {
// raw: "raw"
// },
// database: "database"
// }
// },
// includeTables: ["includeTables"],
// excludeTables: ["excludeTables"],
// subnetId: "subnetId",
// securityGroups: ["securityGroups"],
// clickhouseClusterName: "clickhouseClusterName"
// },
// mysqlTarget: {
// connection: {
// mdbClusterId: "mdbClusterId",
// onPremise: {
// port: 0,
// subnetId: "subnetId",
// hosts: ["hosts"],
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// }
// },
// database: "database",
// user: "user",
// password: {
// raw: "raw"
// },
// sqlMode: "sqlMode",
// skipConstraintChecks: true,
// timezone: "timezone",
// cleanupPolicy: CleanupPolicy.DISABLED,
// serviceDatabase: "serviceDatabase",
// securityGroups: ["securityGroups"]
// },
// postgresTarget: {
// connection: {
// mdbClusterId: "mdbClusterId",
// onPremise: {
// port: 0,
// subnetId: "subnetId",
// hosts: ["hosts"],
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// }
// },
// database: "database",
// user: "user",
// password: {
// raw: "raw"
// },
// cleanupPolicy: CleanupPolicy.DISABLED,
// securityGroups: ["securityGroups"]
// },
// clickhouseTarget: {
// connection: {
// connectionOptions: {
// onPremise: {
// shards: [{
// name: "name",
// hosts: ["hosts"]
// }],
// httpPort: 0,
// nativePort: 0,
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// },
// mdbClusterId: "mdbClusterId",
// user: "user",
// password: {
// raw: "raw"
// },
// database: "database"
// }
// },
// subnetId: "subnetId",
// altNames: [{
// fromName: "fromName",
// toName: "toName"
// }],
// cleanupPolicy: ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_DISABLED,
// sharding: {
// columnValueHash: {
// columnName: "columnName"
// },
// customMapping: {
// columnName: "columnName",
// mapping: [{
// columnValue: {
// stringValue: "stringValue"
// },
// shardName: "shardName"
// }]
// },
// transferId: {
// },
// roundRobin: {
// }
// },
// clickhouseClusterName: "clickhouseClusterName",
// securityGroups: ["securityGroups"]
// },
// ydbTarget: {
// database: "database",
// instance: "instance",
// path: "path",
// serviceAccountId: "serviceAccountId",
// cleanupPolicy: YdbCleanupPolicy.YDB_CLEANUP_POLICY_DISABLED,
// subnetId: "subnetId",
// saKeyContent: "saKeyContent",
// securityGroups: ["securityGroups"],
// isTableColumnOriented: true,
// defaultCompression: YdbDefaultCompression.YDB_DEFAULT_COMPRESSION_DISABLED
// },
// kafkaTarget: {
// connection: {
// clusterId: "clusterId",
// onPremise: {
// brokerUrls: ["brokerUrls"],
// subnetId: "subnetId",
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// }
// },
// auth: {
// sasl: {
// user: "user",
// mechanism: KafkaMechanism.KAFKA_MECHANISM_SHA256,
// password: {
// raw: "raw"
// }
// },
// noAuth: {
// }
// },
// securityGroups: ["securityGroups"],
// topicSettings: {
// topic: {
// topicName: "topicName",
// saveTxOrder: true
// },
// topicPrefix: "topicPrefix"
// },
// serializer: {
// serializerAuto: {
// },
// serializerJson: {
// },
// serializerDebezium: {
// serializerParameters: [{
// key: "key",
// value: "value"
// }]
// }
// }
// },
// mongoTarget: {
// connection: {
// connectionOptions: {
// mdbClusterId: "mdbClusterId",
// onPremise: {
// hosts: ["hosts"],
// port: 0,
// replicaSet: "replicaSet",
// tlsMode: {
// disabled: {
// },
// enabled: {
// caCertificate: "caCertificate"
// }
// }
// },
// user: "user",
// password: {
// raw: "raw"
// },
// authSource: "authSource"
// }
// },
// database: "database",
// cleanupPolicy: CleanupPolicy.DISABLED,
// subnetId: "subnetId",
// securityGroups: ["securityGroups"]
// },
// metrikaSource: {
// counterIds: [0],
// token: {
// raw: "raw"
// },
// streams: [{
// type: MetrikaStreamType.METRIKA_STREAM_TYPE_HITS,
// columns: ["columns"]
// }]
// },
// ydsTarget: {
// database: "database",
// stream: "stream",
// serviceAccountId: "serviceAccountId",
// saveTxOrder: true,
// serializer: {
// serializerAuto: {
// },
// serializerJson: {
// },
// serializerDebezium: {
// serializerParameters: [{
// key: "key",
// value: "value"
// }]
// }
// },
// endpoint: "endpoint",
// subnetId: "subnetId",
// securityGroups: ["securityGroups"]
// }
// }
})
);
await waitForOperation(operation, session);
})();

CreateEndpointRequest

folderId : string
name : string
description : string
labels : string
settings : EndpointSettings

EndpointSettings

One of settings

  • mysqlSource : endpoint.MysqlSource
  • postgresSource : endpoint.PostgresSource
  • ydbSource : endpoint.YdbSource
  • ydsSource : endpoint.YDSSource
  • kafkaSource : endpoint.KafkaSource
  • mongoSource : endpoint.MongoSource
  • clickhouseSource : endpoint.ClickhouseSource
  • mysqlTarget : endpoint.MysqlTarget
  • postgresTarget : endpoint.PostgresTarget
  • clickhouseTarget : endpoint.ClickhouseTarget
  • ydbTarget : endpoint.YdbTarget
  • kafkaTarget : endpoint.KafkaTarget
  • mongoTarget : endpoint.MongoTarget
  • metrikaSource : endpoint.MetrikaSource
  • ydsTarget : endpoint.YDSTarget

MysqlSource

connection : MysqlConnection

Database connection settings

database : string

Database name

You can leave it empty, then it will be possible to transfer tables from several databases at the same time from this source.

user : string

User for database access.

password : Secret

Password for database access.

timezone : string

Database timezone

Is used for parsing timestamps for saving source timezones. Accepts values from IANA timezone database. Default: local timezone.

objectTransferSettings : MysqlObjectTransferSettings

Schema migration

Select database objects to be transferred during activation or deactivation.

includeTablesRegex : string
excludeTablesRegex : string
securityGroups : string

Security groups

serviceDatabase : string

Database for service tables

Default: data source database. Here created technical tables (tm_keeper, tm_gtid_keeper).

PostgresSource

connection : PostgresConnection

Database connection settings

database : string

Database name

user : string

User for database access. not required as may be in connection

password : Secret

Password for database access.

includeTables : string

Included tables

If none or empty list is presented, all tables are replicated. Full table name with schema. Can contain schema_name.* patterns.

excludeTables : string

Excluded tables

If none or empty list is presented, all tables are replicated. Full table name with schema. Can contain schema_name.* patterns.

slotByteLagLimit : int64

Maximum lag of replication slot (in bytes); after exceeding this limit replication will be aborted.

serviceSchema : string

Database schema for service tables (consumer_keeper, data_transfer_mole_finder). Default is public

objectTransferSettings : PostgresObjectTransferSettings

Select database objects to be transferred during activation or deactivation.

securityGroups : string

Security groups

YdbSource

database : string

Path in YDB where to store tables

instance : string

Instance of YDB. example: ydb-ru-prestable.yandex.net:2135

paths : string
serviceAccountId : string
subnetId : string

Network interface for endpoint. If none will assume public ipv4

saKeyContent : string

Authorization Key

securityGroups : string

Security groups

changefeedCustomName : string

Pre-created change feed

YDSSource

database : string

Database

stream : string

Stream

serviceAccountId : string

SA which has read access to the stream.

supportedCodecs : YdsCompressionCodec

Compression codec

parser : Parser

Data parsing rules

allowTtlRewind : bool

Should continue working, if consumer read lag exceed TTL of topic False: stop the transfer in error state, if detected lost data. True: continue working with losing part of data

endpoint : string

for dedicated db

subnetId : string

Network interface for endpoint. If none will assume public ipv4

securityGroups : string

Security groups

consumer : string

for important streams

KafkaSource

connection : KafkaConnectionOptions

Connection settings

auth : KafkaAuth

Authentication settings

securityGroups : string

Security groups

topicName : string

Full source topic name Deprecated in favor of topic names

transformer : DataTransformationOptions

Data transformation rules

parser : Parser

Data parsing rules

topicNames : string

List of topic names to read

MongoSource

connection : MongoConnection
subnetId : string
collections : MongoCollection

List of collections for replication. Empty list implies replication of all tables on the deployment. Allowed to use * as collection name.

excludedCollections : MongoCollection

List of forbidden collections for replication. Allowed to use * as collection name for forbid all collections of concrete schema.

secondaryPreferredMode : bool

Read mode for mongo client

securityGroups : string

Security groups

ClickhouseSource

connection : ClickhouseConnection
includeTables : string

While list of tables for replication. If none or empty list is presented - will replicate all tables. Can contain * patterns.

excludeTables : string

Exclude list of tables for replication. If none or empty list is presented - will replicate all tables. Can contain * patterns.

subnetId : string
securityGroups : string
clickhouseClusterName : string

Name of the ClickHouse cluster. For Managed ClickHouse that is name of ShardGroup.

MysqlTarget

connection : MysqlConnection

Database connection settings

database : string

Database name

Allowed to leave it empty, then the tables will be created in databases with the same names as on the source. If this field is empty, then you must fill below db schema for service table.

user : string

User for database access.

password : Secret

Password for database access.

sqlMode : string

Default: NO_AUTO_VALUE_ON_ZERO,NO_DIR_IN_CREATE,NO_ENGINE_SUBSTITUTION.

skipConstraintChecks : bool

Disable constraints checks

Recommend to disable for increase replication speed, but if schema contain cascading operations we don't recommend to disable. This option set FOREIGN_KEY_CHECKS=0 and UNIQUE_CHECKS=0.

timezone : string

Database timezone

Is used for parsing timestamps for saving source timezones. Accepts values from IANA timezone database. Default: local timezone.

cleanupPolicy : CleanupPolicy

Cleanup policy

Cleanup policy for activate, reactivate and reupload processes. Default is DISABLED.

serviceDatabase : string

Database schema for service table

Default: db name. Here created technical tables (tm_keeper, tm_gtid_keeper).

securityGroups : string

Security groups

PostgresTarget

connection : PostgresConnection

Database connection settings

database : string

Database name

user : string

User for database access. not required as may be in connection

password : Secret

Password for database access.

cleanupPolicy : CleanupPolicy

Cleanup policy for activate, reactivate and reupload processes. Default is truncate.

securityGroups : string

Security groups

ClickhouseTarget

connection : ClickhouseConnection
subnetId : string
altNames : AltName

Alternative table names in target

cleanupPolicy : ClickhouseCleanupPolicy
sharding : ClickhouseSharding
clickhouseClusterName : string

Name of the ClickHouse cluster. For Managed ClickHouse that is name of ShardGroup.

securityGroups : string

YdbTarget

database : string

Path in YDB where to store tables

instance : string

Instance of YDB. example: ydb-ru-prestable.yandex.net:2135

path : string

Path extension for database, each table will be layouted into this path

serviceAccountId : string
cleanupPolicy : YdbCleanupPolicy

Cleanup policy

subnetId : string

Network interface for endpoint. If none will assume public ipv4

saKeyContent : string

SA content

securityGroups : string

Security groups

isTableColumnOriented : bool

Should create column-oriented table (OLAP). By default it creates row-oriented (OLTP)

defaultCompression : YdbDefaultCompression

Compression that will be used for default columns family on YDB table creation

KafkaTarget

connection : KafkaConnectionOptions

Connection settings

auth : KafkaAuth

Authentication settings

securityGroups : string

Security groups

topicSettings : KafkaTargetTopicSettings

Target topic settings

serializer : Serializer

Data serialization format settings

MongoTarget

connection : MongoConnection
database : string

Database name

cleanupPolicy : CleanupPolicy
subnetId : string
securityGroups : string

Security groups

MetrikaSource

counterIds : int64
token : Secret
streams : MetrikaStream

YDSTarget

database : string

Database

stream : string

Stream

serviceAccountId : string

SA which has read access to the stream.

saveTxOrder : bool

Save transaction order Not to split events queue into separate per-table queues. Incompatible with setting Topic prefix, only with Topic full name.

serializer : Serializer

Data serialization format

endpoint : string

for dedicated db

subnetId : string

Network interface for endpoint. If none will assume public ipv4

securityGroups : string

Security groups

MysqlConnection

One of connection

  • mdbClusterId : string

    Managed Service for MySQL cluster ID

  • onPremise : OnPremiseMysql

    Connection options for on-premise MySQL

Secret

One of value

  • raw : string

    Raw secret value

MysqlObjectTransferSettings

view : ObjectTransferStage

Views

CREATE VIEW ...

routine : ObjectTransferStage

Routines

CREATE PROCEDURE ... ; CREATE FUNCTION ... ;

trigger : ObjectTransferStage

Triggers

CREATE TRIGGER ...

tables : ObjectTransferStage

PostgresConnection

  • mdbClusterId : string

    Managed Service for PostgreSQL cluster ID

  • onPremise : OnPremisePostgres

    Connection options for on-premise PostgreSQL

PostgresObjectTransferSettings

sequence : ObjectTransferStage

Sequences

CREATE SEQUENCE ...

sequenceOwnedBy : ObjectTransferStage

Owned sequences

CREATE SEQUENCE ... OWNED BY ...

table : ObjectTransferStage

Tables

CREATE TABLE ...

primaryKey : ObjectTransferStage

Primary keys

ALTER TABLE ... ADD PRIMARY KEY ...

fkConstraint : ObjectTransferStage

Foreign keys

ALTER TABLE ... ADD FOREIGN KEY ...

defaultValues : ObjectTransferStage

Default values

ALTER TABLE ... ALTER COLUMN ... SET DEFAULT ...

constraint : ObjectTransferStage

Constraints

ALTER TABLE ... ADD CONSTRAINT ...

index : ObjectTransferStage

Indexes

CREATE INDEX ...

view : ObjectTransferStage

Views

CREATE VIEW ...

function : ObjectTransferStage

Functions

CREATE FUNCTION ...

trigger : ObjectTransferStage

Triggers

CREATE TRIGGER ...

type : ObjectTransferStage

Types

CREATE TYPE ...

rule : ObjectTransferStage

Rules

CREATE RULE ...

collation : ObjectTransferStage

Collations

CREATE COLLATION ...

policy : ObjectTransferStage

Policies

CREATE POLICY ...

cast : ObjectTransferStage

Casts

CREATE CAST ...

materializedView : ObjectTransferStage

Materialized views

CREATE MATERIALIZED VIEW ...

sequenceSet : ObjectTransferStage

Parser

One of parser

  • jsonParser : GenericParserCommon
  • auditTrailsV1Parser : AuditTrailsV1Parser
  • cloudLoggingParser : CloudLoggingParser
  • tskvParser : GenericParserCommon

KafkaConnectionOptions

  • clusterId : string

    Managed Service for Kafka cluster ID

  • onPremise : OnPremiseKafka

    Connection options for on-premise Kafka

KafkaAuth

One of security

  • sasl : KafkaSaslSecurity

    Authentication with SASL

  • noAuth : NoAuth

    No authentication

DataTransformationOptions

cloudFunction : string

Cloud function

numberOfRetries : int64

Number of retries

bufferSize : string

Buffer size for function

bufferFlushInterval : string

Flush interval

invocationTimeout : string

Invocation timeout

serviceAccountId : string

Service account

MongoConnection

  • connectionOptions : MongoConnectionOptions

MongoCollection

databaseName : string
collectionName : string

ClickhouseConnection

  • connectionOptions : ClickhouseConnectionOptions

AltName

fromName : string

Source table name

toName : string

Target table name

ClickhouseSharding

ColumnValueHash
columnName : string
ColumnValueMapping
ValueToShard
columnValue : ColumnValue
shardName : string
columnName : string
mapping : ValueToShard

One of sharding

  • columnValueHash : ColumnValueHash
  • customMapping : ColumnValueMapping
  • transferId : google.protobuf.Empty
  • roundRobin : google.protobuf.Empty

KafkaTargetTopicSettings

One of topicSettings

  • topic : KafkaTargetTopic

    Full topic name

  • topicPrefix : string

    Topic prefix

Analogue of the Debezium setting database.server.name. Messages will be sent to topic with name <topic_prefix>.<schema>.<table_name>.

Serializer

Data serialization format

One of serializer

  • serializerAuto : SerializerAuto

    Select the serialization format automatically

  • serializerJson : SerializerJSON

    Serialize data in json format

  • serializerDebezium : SerializerDebezium

    Serialize data in debezium format

MetrikaStream

type : MetrikaStreamType
columns : string

OnPremiseMysql

port : int64

Database port

subnetId : string

Network interface for endpoint. If none will assume public ipv4

hosts : string
tlsMode : TLSMode

TLS settings for server connection. Disabled by default.

OnPremisePostgres

port : int64

Will be used if the cluster ID is not specified.

subnetId : string

Network interface for endpoint. If none will assume public ipv4

hosts : string
tlsMode : TLSMode

TLS settings for server connection. Disabled by default.

GenericParserCommon

dataSchema : DataSchema
nullKeysAllowed : bool

Allow null keys, if no - null keys will be putted to unparsed data

addRestColumn : bool

Will add _rest column for all unknown fields

unescapeStringValues : bool

Unescape string values

AuditTrailsV1Parser

CloudLoggingParser

OnPremiseKafka

brokerUrls : string

Kafka broker URLs

subnetId : string

Network interface for endpoint. If none will assume public ipv4

tlsMode : TLSMode

TLS settings for broker connection. Disabled by default.

KafkaSaslSecurity

user : string

User name

mechanism : KafkaMechanism

SASL mechanism for authentication

password : Secret

Password for user

NoAuth

No authentication

MongoConnectionOptions

One of address

  • mdbClusterId : string
  • onPremise : OnPremiseMongo
user : string

User name

password : Secret

Password for user

authSource : string

Database name associated with the credentials

ClickhouseConnectionOptions

  • onPremise : OnPremiseClickhouse
  • mdbClusterId : string
user : string
password : Secret
database : string

Database

ColumnValue

  • stringValue : string

ValueToShard

columnValue : ColumnValue
shardName : string

ColumnValueHash

columnName : string

ColumnValueMapping

ValueToShard
columnValue : ColumnValue
shardName : string
columnName : string
mapping : ValueToShard

KafkaTargetTopic

topicName : string

Topic name

saveTxOrder : bool

Save transactions order Not to split events queue into separate per-table queues.

SerializerAuto

SerializerJSON

SerializerDebezium

serializerParameters : DebeziumSerializerParameter

Settings of sterilization parameters as key-value pairs

TLSMode

One of tlsMode

  • disabled : google.protobuf.Empty
  • enabled : TLSConfig

DataSchema

One of schema

  • jsonFields : string
  • fields : FieldList

OnPremiseMongo

hosts : string
port : int64
replicaSet : string
tlsMode : TLSMode

OnPremiseClickhouse

shards : ClickhouseShard
httpPort : int64
nativePort : int64
tlsMode : TLSMode

DebeziumSerializerParameter

key : string

Name of the serializer parameter

value : string

Value of the serializer parameter

TLSConfig

caCertificate : string

CA certificate

X.509 certificate of the certificate authority which issued the server's certificate, in PEM format. When CA certificate is specified TLS is used to connect to the server.

FieldList

fields : ColSchema

Column schema

ClickhouseShard

name : string
hosts : string

ColSchema

name : string
type : ColumnType
key : bool
required : bool
path : string

Operation

An Operation resource. For more information, see Operation.

id : string

ID of the operation.

description : string

Description of the operation. 0-256 characters long.

createdAt : google.protobuf.Timestamp

Creation timestamp.

createdBy : string

ID of the user or service account who initiated the operation.

modifiedAt : google.protobuf.Timestamp

The time when the Operation resource was last modified.

done : bool

If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.

metadata : google.protobuf.Any

Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.

One of result

The operation result. If done == false and there was no failure detected, neither error nor response is set. If done == false and there was a failure detected, error is set. If done == true, exactly one of error or response is set.

  • error : google.rpc.Status

    The error result of the operation in case of failure or cancellation.

  • response : google.protobuf.Any
    The normal response of the operation in case of success.

    If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.