UNPKG

@pulumi/gcp

Version:

A Pulumi package for creating and managing Google Cloud Platform resources.

354 lines • 13.2 kB
"use strict"; // *** WARNING: this file was generated by pulumi-language-nodejs. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** Object.defineProperty(exports, "__esModule", { value: true }); exports.Routine = void 0; const pulumi = require("@pulumi/pulumi"); const utilities = require("../utilities"); /** * A user-defined function or a stored procedure that belongs to a Dataset * * To get more information about Routine, see: * * * [API documentation](https://cloud.google.com/bigquery/docs/reference/rest/v2/routines) * * How-to Guides * * [Routines Intro](https://cloud.google.com/bigquery/docs/reference/rest/v2/routines) * * ## Example Usage * * ### Bigquery Routine Basic * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"}); * const sproc = new gcp.bigquery.Routine("sproc", { * datasetId: test.datasetId, * routineId: "routine_id", * routineType: "PROCEDURE", * language: "SQL", * securityMode: "INVOKER", * definitionBody: "CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);", * }); * ``` * ### Bigquery Routine Json * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"}); * const sproc = new gcp.bigquery.Routine("sproc", { * datasetId: test.datasetId, * routineId: "routine_id", * routineType: "SCALAR_FUNCTION", * language: "JAVASCRIPT", * definitionBody: "CREATE FUNCTION multiplyInputs return x*y;", * arguments: [ * { * name: "x", * dataType: "{\"typeKind\" : \"FLOAT64\"}", * }, * { * name: "y", * dataType: "{\"typeKind\" : \"FLOAT64\"}", * }, * ], * returnType: "{\"typeKind\" : \"FLOAT64\"}", * }); * ``` * ### Bigquery Routine Tvf * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"}); * const sproc = new gcp.bigquery.Routine("sproc", { * datasetId: test.datasetId, * routineId: "routine_id", * routineType: "TABLE_VALUED_FUNCTION", * language: "SQL", * definitionBody: "SELECT 1 + value AS value\n", * arguments: [{ * name: "value", * argumentKind: "FIXED_TYPE", * dataType: JSON.stringify({ * typeKind: "INT64", * }), * }], * returnTableType: JSON.stringify({ * columns: [{ * name: "value", * type: { * typeKind: "INT64", * }, * }], * }), * }); * ``` * ### Bigquery Routine Pyspark * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"}); * const testConnection = new gcp.bigquery.Connection("test", { * connectionId: "connection_id", * location: "US", * spark: {}, * }); * const pyspark = new gcp.bigquery.Routine("pyspark", { * datasetId: test.datasetId, * routineId: "routine_id", * routineType: "PROCEDURE", * language: "PYTHON", * definitionBody: `from pyspark.sql import SparkSession * * spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate() * * # Load data from BigQuery. * words = spark.read.format("bigquery") \\ * .option("table", "bigquery-public-data:samples.shakespeare") \\ * .load() * words.createOrReplaceTempView("words") * * # Perform word count. * word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count") * word_count.show() * word_count.printSchema() * * # Saving the data to BigQuery * word_count.write.format("bigquery") \\ * .option("writeMethod", "direct") \\ * .save("wordcount_dataset.wordcount_output") * `, * sparkOptions: { * connection: testConnection.name, * runtimeVersion: "2.1", * }, * }); * ``` * ### Bigquery Routine Pyspark Mainfile * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"}); * const testConnection = new gcp.bigquery.Connection("test", { * connectionId: "connection_id", * location: "US", * spark: {}, * }); * const pysparkMainfile = new gcp.bigquery.Routine("pyspark_mainfile", { * datasetId: test.datasetId, * routineId: "routine_id", * routineType: "PROCEDURE", * language: "PYTHON", * definitionBody: "", * sparkOptions: { * connection: testConnection.name, * runtimeVersion: "2.1", * mainFileUri: "gs://test-bucket/main.py", * pyFileUris: ["gs://test-bucket/lib.py"], * fileUris: ["gs://test-bucket/distribute_in_executor.json"], * archiveUris: ["gs://test-bucket/distribute_in_executor.tar.gz"], * }, * }); * ``` * ### Bigquery Routine Spark Jar * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"}); * const testConnection = new gcp.bigquery.Connection("test", { * connectionId: "connection_id", * location: "US", * spark: {}, * }); * const sparkJar = new gcp.bigquery.Routine("spark_jar", { * datasetId: test.datasetId, * routineId: "routine_id", * routineType: "PROCEDURE", * language: "SCALA", * definitionBody: "", * sparkOptions: { * connection: testConnection.name, * runtimeVersion: "2.1", * containerImage: "gcr.io/my-project-id/my-spark-image:latest", * mainClass: "com.google.test.jar.MainClass", * jarUris: ["gs://test-bucket/uberjar_spark_spark3.jar"], * properties: { * "spark.dataproc.scaling.version": "2", * "spark.reducer.fetchMigratedShuffle.enabled": "true", * }, * }, * }); * ``` * ### Bigquery Routine Data Governance Type * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "tf_test_dataset_id_81126"}); * const customMaskingRoutine = new gcp.bigquery.Routine("custom_masking_routine", { * datasetId: test.datasetId, * routineId: "custom_masking_routine", * routineType: "SCALAR_FUNCTION", * language: "SQL", * dataGovernanceType: "DATA_MASKING", * definitionBody: "SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')", * arguments: [{ * name: "ssn", * dataType: "{\"typeKind\" : \"STRING\"}", * }], * returnType: "{\"typeKind\" : \"STRING\"}", * }); * ``` * ### Bigquery Routine Remote Function * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as gcp from "@pulumi/gcp"; * * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"}); * const testConnection = new gcp.bigquery.Connection("test", { * connectionId: "connection_id", * location: "US", * cloudResource: {}, * }); * const remoteFunction = new gcp.bigquery.Routine("remote_function", { * datasetId: test.datasetId, * routineId: "routine_id", * routineType: "SCALAR_FUNCTION", * definitionBody: "", * returnType: "{\"typeKind\" : \"STRING\"}", * remoteFunctionOptions: { * endpoint: "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add", * connection: testConnection.name, * maxBatchingRows: "10", * userDefinedContext: { * z: "1.5", * }, * }, * }); * ``` * * ## Import * * Routine can be imported using any of these accepted formats: * * * `projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}` * * * `{{project}}/{{dataset_id}}/{{routine_id}}` * * * `{{dataset_id}}/{{routine_id}}` * * When using the `pulumi import` command, Routine can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:bigquery/routine:Routine default projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}} * ``` * * ```sh * $ pulumi import gcp:bigquery/routine:Routine default {{project}}/{{dataset_id}}/{{routine_id}} * ``` * * ```sh * $ pulumi import gcp:bigquery/routine:Routine default {{dataset_id}}/{{routine_id}} * ``` */ class Routine extends pulumi.CustomResource { /** * Get an existing Routine resource's state with the given name, ID, and optional extra * properties used to qualify the lookup. * * @param name The _unique_ name of the resulting resource. * @param id The _unique_ provider ID of the resource to lookup. * @param state Any extra arguments used during the lookup. * @param opts Optional settings to control the behavior of the CustomResource. */ static get(name, id, state, opts) { return new Routine(name, state, { ...opts, id: id }); } /** * Returns true if the given object is an instance of Routine. This is designed to work even * when multiple copies of the Pulumi SDK have been loaded into the same process. */ static isInstance(obj) { if (obj === undefined || obj === null) { return false; } return obj['__pulumiType'] === Routine.__pulumiType; } constructor(name, argsOrState, opts) { let resourceInputs = {}; opts = opts || {}; if (opts.id) { const state = argsOrState; resourceInputs["arguments"] = state?.arguments; resourceInputs["creationTime"] = state?.creationTime; resourceInputs["dataGovernanceType"] = state?.dataGovernanceType; resourceInputs["datasetId"] = state?.datasetId; resourceInputs["definitionBody"] = state?.definitionBody; resourceInputs["description"] = state?.description; resourceInputs["determinismLevel"] = state?.determinismLevel; resourceInputs["importedLibraries"] = state?.importedLibraries; resourceInputs["language"] = state?.language; resourceInputs["lastModifiedTime"] = state?.lastModifiedTime; resourceInputs["project"] = state?.project; resourceInputs["remoteFunctionOptions"] = state?.remoteFunctionOptions; resourceInputs["returnTableType"] = state?.returnTableType; resourceInputs["returnType"] = state?.returnType; resourceInputs["routineId"] = state?.routineId; resourceInputs["routineType"] = state?.routineType; resourceInputs["securityMode"] = state?.securityMode; resourceInputs["sparkOptions"] = state?.sparkOptions; } else { const args = argsOrState; if (args?.datasetId === undefined && !opts.urn) { throw new Error("Missing required property 'datasetId'"); } if (args?.definitionBody === undefined && !opts.urn) { throw new Error("Missing required property 'definitionBody'"); } if (args?.routineId === undefined && !opts.urn) { throw new Error("Missing required property 'routineId'"); } if (args?.routineType === undefined && !opts.urn) { throw new Error("Missing required property 'routineType'"); } resourceInputs["arguments"] = args?.arguments; resourceInputs["dataGovernanceType"] = args?.dataGovernanceType; resourceInputs["datasetId"] = args?.datasetId; resourceInputs["definitionBody"] = args?.definitionBody; resourceInputs["description"] = args?.description; resourceInputs["determinismLevel"] = args?.determinismLevel; resourceInputs["importedLibraries"] = args?.importedLibraries; resourceInputs["language"] = args?.language; resourceInputs["project"] = args?.project; resourceInputs["remoteFunctionOptions"] = args?.remoteFunctionOptions; resourceInputs["returnTableType"] = args?.returnTableType; resourceInputs["returnType"] = args?.returnType; resourceInputs["routineId"] = args?.routineId; resourceInputs["routineType"] = args?.routineType; resourceInputs["securityMode"] = args?.securityMode; resourceInputs["sparkOptions"] = args?.sparkOptions; resourceInputs["creationTime"] = undefined /*out*/; resourceInputs["lastModifiedTime"] = undefined /*out*/; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(Routine.__pulumiType, name, resourceInputs, opts); } } exports.Routine = Routine; /** @internal */ Routine.__pulumiType = 'gcp:bigquery/routine:Routine'; //# sourceMappingURL=routine.js.map