Update
Update PXF datasource
- TypeScript
- Python
import {
cloudApi,
decodeMessage,
serviceClients,
Session,
waitForOperation,
} from "@yandex-cloud/nodejs-sdk";
const PXFDatasource = cloudApi.mdb.greenplum_pxf.PXFDatasource;
const UpdatePXFDatasourceRequest =
cloudApi.mdb.greenplum_pxf_service.UpdatePXFDatasourceRequest;
(async () => {
const authToken = process.env["YC_OAUTH_TOKEN"];
const session = new Session({ oauthToken: authToken });
const client = session.client(serviceClients.PXFDatasourceServiceClient);
const operation = await client.update(
UpdatePXFDatasourceRequest.fromPartial({
clusterId: "clusterId",
// updateMask: {
// paths: ["paths"]
// },
// datasource: {
// name: "name",
// s3: {
// accessKey: "accessKey",
// secretKey: "secretKey",
// fastUpload: {
// value: true
// },
// endpoint: "endpoint"
// },
// jdbc: {
// driver: "driver",
// url: "url",
// user: "user",
// password: "password",
// statementBatchSize: {
// value: 0
// },
// statementFetchSize: {
// value: 0
// },
// statementQueryTimeout: {
// value: 0
// },
// poolEnabled: {
// value: true
// },
// poolMaximumSize: {
// value: 0
// },
// poolConnectionTimeout: {
// value: 0
// },
// poolIdleTimeout: {
// value: 0
// },
// poolMinimumIdle: {
// value: 0
// }
// },
// hdfs: {
// core: {
// defaultFs: "defaultFs",
// securityAuthToLocal: "securityAuthToLocal"
// },
// kerberos: {
// enable: {
// value: true
// },
// primary: "primary",
// realm: "realm",
// kdcServers: ["kdcServers"],
// adminServer: "adminServer",
// defaultDomain: "defaultDomain",
// keytabBase64: "keytabBase64"
// },
// userImpersonation: {
// value: true
// },
// username: "username",
// saslConnectionRetries: {
// value: 0
// },
// zkHosts: ["zkHosts"],
// dfs: {
// haAutomaticFailoverEnabled: {
// value: true
// },
// blockAccessTokenEnabled: {
// value: true
// },
// useDatanodeHostname: {
// value: true
// },
// namenodes: {undefined: {
// rpcAddress: "rpcAddress",
// serviceRpcAddress: "serviceRpcAddress",
// httpAddress: "httpAddress",
// httpsAddress: "httpsAddress"
// }},
// nameservices: "nameservices"
// },
// yarn: {
// resourcemanagerHaEnabled: {
// value: true
// },
// resourcemanagerHaAutoFailoverEnabled: {
// value: true
// },
// resourcemanagerHaAutoFailoverEmbedded: {
// value: true
// },
// resourcemanagerClusterId: "resourcemanagerClusterId",
// haRm: {undefined: {
// resourcemanagerAddress: "resourcemanagerAddress",
// resourcemanagerSchedulerAddress: "resourcemanagerSchedulerAddress",
// resourcemanagerResourceTrackerAddress: "resourcemanagerResourceTrackerAddress",
// resourcemanagerAdminAddress: "resourcemanagerAdminAddress",
// resourcemanagerWebappAddress: "resourcemanagerWebappAddress",
// resourcemanagerWebappHttpsAddress: "resourcemanagerWebappHttpsAddress"
// }}
// }
// },
// hive: {
// core: {
// defaultFs: "defaultFs",
// securityAuthToLocal: "securityAuthToLocal"
// },
// kerberos: {
// enable: {
// value: true
// },
// primary: "primary",
// realm: "realm",
// kdcServers: ["kdcServers"],
// adminServer: "adminServer",
// defaultDomain: "defaultDomain",
// keytabBase64: "keytabBase64"
// },
// userImpersonation: {
// value: true
// },
// username: "username",
// saslConnectionRetries: {
// value: 0
// },
// zkHosts: ["zkHosts"],
// ppd: {
// value: true
// },
// metastoreUris: ["metastoreUris"],
// metastoreKerberosPrincipal: "metastoreKerberosPrincipal",
// authKerberosPrincipal: "authKerberosPrincipal"
// }
// }
})
);
const finishedOp = await waitForOperation(operation, session);
if (finishedOp.response) {
const result = decodeMessage<typeof PXFDatasource>(finishedOp.response);
console.log(result);
}
})();
import os
import grpc
import yandexcloud
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasource
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceCore
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceHDFS
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceHDFSDfs
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceHDFSDfsNamenode
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceHDFSYarn
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceHDFSYarnHaRm
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceHive
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceJDBC
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceKerberos
from yandex.cloud.mdb.greenplum.v1.pxf_pb2 import PXFDatasourceS3
from yandex.cloud.mdb.greenplum.v1.pxf_service_pb2_grpc import PXFDatasourceServiceStub
from yandex.cloud.mdb.greenplum.v1.pxf_service_pb2 import UpdatePXFDatasourceMetadata
from yandex.cloud.mdb.greenplum.v1.pxf_service_pb2 import UpdatePXFDatasourceRequest
token = os.getenv("YC_OAUTH_TOKEN")
sdk = yandexcloud.SDK(token=token)
service = sdk.client(PXFDatasourceServiceStub)
operation = service.Update(
UpdatePXFDatasourceRequest(
cluster_id="clusterId",
# update_mask = FieldMask.FromJsonString("field1,field2"),
# datasource = PXFDatasource(
# name = "name",
# s_3 = PXFDatasourceS3(
# access_key = "accessKey",
# secret_key = "secretKey",
# fast_upload = BoolValue(
# value = true
# ),
# endpoint = "endpoint"
# ),
# jdbc = PXFDatasourceJDBC(
# driver = "driver",
# url = "url",
# user = "user",
# password = "password",
# statement_batch_size = Int64Value(
# value = 0
# ),
# statement_fetch_size = Int64Value(
# value = 0
# ),
# statement_query_timeout = Int64Value(
# value = 0
# ),
# pool_enabled = BoolValue(
# value = true
# ),
# pool_maximum_size = Int64Value(
# value = 0
# ),
# pool_connection_timeout = Int64Value(
# value = 0
# ),
# pool_idle_timeout = Int64Value(
# value = 0
# ),
# pool_minimum_idle = Int64Value(
# value = 0
# )
# ),
# hdfs = PXFDatasourceHDFS(
# core = PXFDatasourceCore(
# default_fs = "defaultFs",
# security_auth_to_local = "securityAuthToLocal"
# ),
# kerberos = PXFDatasourceKerberos(
# enable = BoolValue(
# value = true
# ),
# primary = "primary",
# realm = "realm",
# kdc_servers = ["kdcServers"],
# admin_server = "adminServer",
# default_domain = "defaultDomain",
# keytab_base_64 = "keytabBase64"
# ),
# user_impersonation = BoolValue(
# value = true
# ),
# username = "username",
# sasl_connection_retries = Int64Value(
# value = 0
# ),
# zk_hosts = ["zkHosts"],
# dfs = PXFDatasourceHDFSDfs(
# ha_automatic_failover_enabled = BoolValue(
# value = true
# ),
# block_access_token_enabled = BoolValue(
# value = true
# ),
# use_datanode_hostname = BoolValue(
# value = true
# ),
# namenodes = {undefined: PXFDatasourceHDFSDfsNamenode(
# rpc_address = "rpcAddress",
# service_rpc_address = "serviceRpcAddress",
# http_address = "httpAddress",
# https_address = "httpsAddress"
# )},
# nameservices = "nameservices"
# ),
# yarn = PXFDatasourceHDFSYarn(
# resourcemanager_ha_enabled = BoolValue(
# value = true
# ),
# resourcemanager_ha_auto_failover_enabled = BoolValue(
# value = true
# ),
# resourcemanager_ha_auto_failover_embedded = BoolValue(
# value = true
# ),
# resourcemanager_cluster_id = "resourcemanagerClusterId",
# ha_rm = {undefined: PXFDatasourceHDFSYarnHaRm(
# resourcemanager_address = "resourcemanagerAddress",
# resourcemanager_scheduler_address = "resourcemanagerSchedulerAddress",
# resourcemanager_resource_tracker_address = "resourcemanagerResourceTrackerAddress",
# resourcemanager_admin_address = "resourcemanagerAdminAddress",
# resourcemanager_webapp_address = "resourcemanagerWebappAddress",
# resourcemanager_webapp_https_address = "resourcemanagerWebappHttpsAddress"
# )}
# )
# ),
# hive = PXFDatasourceHive(
# core = PXFDatasourceCore(
# default_fs = "defaultFs",
# security_auth_to_local = "securityAuthToLocal"
# ),
# kerberos = PXFDatasourceKerberos(
# enable = BoolValue(
# value = true
# ),
# primary = "primary",
# realm = "realm",
# kdc_servers = ["kdcServers"],
# admin_server = "adminServer",
# default_domain = "defaultDomain",
# keytab_base_64 = "keytabBase64"
# ),
# user_impersonation = BoolValue(
# value = true
# ),
# username = "username",
# sasl_connection_retries = Int64Value(
# value = 0
# ),
# zk_hosts = ["zkHosts"],
# ppd = BoolValue(
# value = true
# ),
# metastore_uris = ["metastoreUris"],
# metastore_kerberos_principal = "metastoreKerberosPrincipal",
# auth_kerberos_principal = "authKerberosPrincipal"
# )
# )
)
)
operation_result = sdk.wait_operation_and_get_result(
operation,
response_type=PXFDatasource,
meta_type=UpdatePXFDatasourceMetadata,
)
print(operation_result)
UpdatePXFDatasourceRequest
clusterId
: string
updateMask
: google.protobuf.FieldMask
datasource
: PXFDatasource
PXFDatasource
name
: string
Data source name.
One of settings
s3
: PXFDatasourceS3Settings of an external S3 data source.
jdbc
: PXFDatasourceJDBCSettings of an external JDBC data source.
hdfs
: PXFDatasourceHDFSSettings of an external HDFS data source.
hive
: PXFDatasourceHiveSettings of an external Hive data source.
PXFDatasourceS3
accessKey
: string
Public key to access S3 storage.
secretKey
: string
Secret key to access S3 storage.
fastUpload
: google.protobuf.BoolValue
Manages a fast upload of big files to S3 storage. In case of the false
value, the PXF generates files on disk before sending them to the S3 storage. In case of the true
value, the PXF generates files in RAM (the PXF writes to disc only if there is not enough RAM).
The fast upload is enabled by default.
endpoint
: string
S3 storage address. The default value is storage.yandexcloud.net
used for Yandex Object Storage.
PXFDatasourceJDBC
driver
: string
JDBC driver class in Java. The possible values are the following:
com.simba.athena.jdbc.Driver
com.clickhouse.jdbc.ClickHouseDriver
com.ibm.as400.access.AS400JDBCDriver
com.microsoft.sqlserver.jdbc.SQLServerDriver
com.mysql.cj.jdbc.Driver
org.postgresql.Driver
oracle.jdbc.driver.OracleDriver
net.snowflake.client.jdbc.SnowflakeDriver
io.trino.jdbc.TrinoDriver
url
: string
URL that the JDBC driver uses to connect to the database. Examples:
jdbc:mysql://mysqlhost:3306/testdb
: Local MySQL DB.jdbc:postgresql://c-<cluster_id>.rw.mdb.yandexcloud.net:6432/db1
: Managed Service for PostgreSQL cluster. The address contains the special FQDN of the cluster's master.jdbc:oracle:thin:@host.example:1521:orcl
: Oracle DB.
user
: string
Username of the DB owner.
password
: string
Password of the DB owner.
statementBatchSize
: google.protobuf.Int64Value
Number of rows to read in an external table, in a batch.
The default value is 100
.
statementFetchSize
: google.protobuf.Int64Value
Number of rows to fetch (buffer) when reading from an external table.
The default value is 1000
.
statementQueryTimeout
: google.protobuf.Int64Value
Amount of time (in seconds) the JDBC driver waits for a statement to run. This timeout applies to statements created for both read and write operations.
The default value is 60
.
poolEnabled
: google.protobuf.BoolValue
Determines whether JDBC connection pooling is used in a server configuration. By default, it is used.
poolMaximumSize
: google.protobuf.Int64Value
Maximum number of connections to the DB backend.
The default value is 5
.
poolConnectionTimeout
: google.protobuf.Int64Value
Maximum time, in milliseconds, to wait for a connection from the pool.
The default value is 30000
.
poolIdleTimeout
: google.protobuf.Int64Value
Maximum amount of time, in milliseconds, after which an inactive connection is considered idle.
The default value is 30000
.
poolMinimumIdle
: google.protobuf.Int64Value
Minimum number of idle connections maintained in the connection pool.
The default value is 0
.
PXFDatasourceHDFS
core
: PXFDatasourceCore
Settings of the file system and security rules.
kerberos
: PXFDatasourceKerberos
Settings of the Kerberos network authentication protocol.
userImpersonation
: google.protobuf.BoolValue
Enables authentication on behalf of the Greenplum® user when connecting to the remote file storage or DBMS.
The authentication is disabled by default.
username
: string
Login username for the remote file storage or DBMS if authentication on behalf of the Greenplum® user is enabled.
saslConnectionRetries
: google.protobuf.Int64Value
Maximum number of times that PXF retries a SASL connection request after a refused connection returns a GSS initiate failed
error.
The default value is 5
.
zkHosts
: string
ZooKeeper server hosts.
Specify values in the <address>:<port>
format.
dfs
: PXFDatasourceHDFSDfs
Settings of the distributed file system.
yarn
: PXFDatasourceHDFSYarn
Settings of the ResourceManager service that is responsible for tracking resources in a cluster and scheduling applications (e.g., MapReduce jobs).
PXFDatasourceHive
core
: PXFDatasourceCore
Settings of the file system and security rules.
kerberos
: PXFDatasourceKerberos
Settings of the Kerberos network authentication protocol.
userImpersonation
: google.protobuf.BoolValue
Enables authentication on behalf of the Greenplum® user when connecting to the remote file storage or DBMS.
The authentication is disabled by default.
username
: string
Login username for the remote file storage or DBMS if authentication on behalf of the Greenplum® user is enabled.
saslConnectionRetries
: google.protobuf.Int64Value
Maximum number of times that PXF retries a SASL connection request after a refused connection returns a GSS initiate failed
error.
The default value is 5
.
zkHosts
: string
ZooKeeper server hosts.
Specify values in the <address>:<port>
format.
ppd
: google.protobuf.BoolValue
Specifies if predicate pushdown is enabled for queries on external tables.
The predicate pushdown is enabled by default.
metastoreUris
: string
List of URIs separated by commas. To request metadata, the remote DBMS connects to Metastore by one of these URIs.
metastoreKerberosPrincipal
: string
Service principal for the Metastore Thrift server.
authKerberosPrincipal
: string
Kerberos server principal.
PXFDatasourceCore
defaultFs
: string
URI whose scheme and authority determine the file system implementation.
securityAuthToLocal
: string
Rules for mapping Kerberos principals to operating system user accounts.
PXFDatasourceKerberos
enable
: google.protobuf.BoolValue
Determines whether the Kerberos authentication server is used. By default, it is not used.
primary
: string
Host of the primary KDC server (Key Distribution Center).
realm
: string
Kerberos realm for a Greenplum® DB.
kdcServers
: string
KDC server hosts.
adminServer
: string
Administration server host. Usually, this is the primary Kerberos server.
defaultDomain
: string
Domain that is used for the host name extension. Applicable when Kerberos 4 service members become Kerberos 5 service members (for example, when rcmd.hostname is replaced with host/hostname.domain).
keytabBase64
: string
Base64 encoded contents of the keytab file.
PXFDatasourceHDFSDfs
haAutomaticFailoverEnabled
: google.protobuf.BoolValue
Determines whether automatic failover is enabled for the high availability of the file system.
The automatic failover is enabled by default.
blockAccessTokenEnabled
: google.protobuf.BoolValue
If true
, access tokens are used as capabilities for accessing datanodes. If false
, no access tokens are checked on accessing datanodes.
The check of access tokens is enabled by default.
useDatanodeHostname
: google.protobuf.BoolValue
Determines whether the datanode hostname is used when connecting to datanodes.
namenodes
: PXFDatasourceHDFSDfsNamenode
List of HDFS service logical names.
Specify them separated by commas. The names can be arbitrary.
nameservices
: string
Corresponds well-known HDFS client setting "dfs.nameservices" for this datasource
PXFDatasourceHDFSYarn
resourcemanagerHaEnabled
: google.protobuf.BoolValue
Determines whether high availability is enabled for YARN's ResourceManager services.
The high availability is enabled by default.
resourcemanagerHaAutoFailoverEnabled
: google.protobuf.BoolValue
Determines whether another ResourceManager should automatically become active when the active ResourceManager has failed and does not respond.
The switch of ResourceManagers is enabled by default if the high availability is enabled.
resourcemanagerHaAutoFailoverEmbedded
: google.protobuf.BoolValue
Determines whether the embedded ActiveStandbyElector method should be used for the election of the active ResourceManager. If the current active ResourceManager has failed and does not respond, the ActiveStandbyElector method makes another ResourceManager active which then takes over.
resourcemanagerClusterId
: string
Cluster ID. Specify it, so the ResourceManager service does not become active for a different cluster.
haRm
: PXFDatasourceHDFSYarnHaRm
Highly available ResourceManager service.
PXFDatasourceHDFSDfsNamenode
rpcAddress
: string
serviceRpcAddress
: string
httpAddress
: string
httpsAddress
: string
PXFDatasourceHDFSYarnHaRm
resourcemanagerAddress
: string
resourcemanagerSchedulerAddress
: string
resourcemanagerResourceTrackerAddress
: string
resourcemanagerAdminAddress
: string
resourcemanagerWebappAddress
: string
resourcemanagerWebappHttpsAddress
: string
Operation
An Operation resource. For more information, see Operation.
id
: string
ID of the operation.
description
: string
Description of the operation. 0-256 characters long.
createdAt
: google.protobuf.Timestamp
Creation timestamp.
createdBy
: string
ID of the user or service account who initiated the operation.
modifiedAt
: google.protobuf.Timestamp
The time when the Operation resource was last modified.
done
: bool
If the value is false
, it means the operation is still in progress.
If true
, the operation is completed, and either error
or response
is available.
metadata
: google.protobuf.Any
Service-specific metadata associated with the operation. It typically contains the ID of the target resource that the operation is performed on. Any method that returns a long-running operation should document the metadata type, if any.
One of result
The operation result.
If done == false
and there was no failure detected, neither error
nor response
is set.
If done == false
and there was a failure detected, error
is set.
If done == true
, exactly one of error
or response
is set.
error
: google.rpc.StatusThe error result of the operation in case of failure or cancellation.
response
: google.protobuf.AnyThe normal response of the operation in case of success.
If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is the standard Create/Update, the response should be the target resource of the operation. Any method that returns a long-running operation should document the response type, if any.