@pulumi/gcp
Version:
A Pulumi package for creating and managing Google Cloud Platform resources.
576 lines (575 loc) • 23 kB
TypeScript
import * as pulumi from "@pulumi/pulumi";
import * as inputs from "../types/input";
import * as outputs from "../types/output";
/**
* A user-defined function or a stored procedure that belongs to a Dataset
*
* To get more information about Routine, see:
*
* * [API documentation](https://cloud.google.com/bigquery/docs/reference/rest/v2/routines)
* * How-to Guides
* * [Routines Intro](https://cloud.google.com/bigquery/docs/reference/rest/v2/routines)
*
* ## Example Usage
*
* ### Bigquery Routine Basic
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
* const sproc = new gcp.bigquery.Routine("sproc", {
* datasetId: test.datasetId,
* routineId: "routine_id",
* routineType: "PROCEDURE",
* language: "SQL",
* securityMode: "INVOKER",
* definitionBody: "CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);",
* });
* ```
* ### Bigquery Routine Json
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
* const sproc = new gcp.bigquery.Routine("sproc", {
* datasetId: test.datasetId,
* routineId: "routine_id",
* routineType: "SCALAR_FUNCTION",
* language: "JAVASCRIPT",
* definitionBody: "CREATE FUNCTION multiplyInputs return x*y;",
* arguments: [
* {
* name: "x",
* dataType: "{\"typeKind\" : \"FLOAT64\"}",
* },
* {
* name: "y",
* dataType: "{\"typeKind\" : \"FLOAT64\"}",
* },
* ],
* returnType: "{\"typeKind\" : \"FLOAT64\"}",
* });
* ```
* ### Bigquery Routine Tvf
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
* const sproc = new gcp.bigquery.Routine("sproc", {
* datasetId: test.datasetId,
* routineId: "routine_id",
* routineType: "TABLE_VALUED_FUNCTION",
* language: "SQL",
* definitionBody: "SELECT 1 + value AS value\n",
* arguments: [{
* name: "value",
* argumentKind: "FIXED_TYPE",
* dataType: JSON.stringify({
* typeKind: "INT64",
* }),
* }],
* returnTableType: JSON.stringify({
* columns: [{
* name: "value",
* type: {
* typeKind: "INT64",
* },
* }],
* }),
* });
* ```
* ### Bigquery Routine Pyspark
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
* const testConnection = new gcp.bigquery.Connection("test", {
* connectionId: "connection_id",
* location: "US",
* spark: {},
* });
* const pyspark = new gcp.bigquery.Routine("pyspark", {
* datasetId: test.datasetId,
* routineId: "routine_id",
* routineType: "PROCEDURE",
* language: "PYTHON",
* definitionBody: `from pyspark.sql import SparkSession
*
* spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
*
* # Load data from BigQuery.
* words = spark.read.format("bigquery") \\
* .option("table", "bigquery-public-data:samples.shakespeare") \\
* .load()
* words.createOrReplaceTempView("words")
*
* # Perform word count.
* word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
* word_count.show()
* word_count.printSchema()
*
* # Saving the data to BigQuery
* word_count.write.format("bigquery") \\
* .option("writeMethod", "direct") \\
* .save("wordcount_dataset.wordcount_output")
* `,
* sparkOptions: {
* connection: testConnection.name,
* runtimeVersion: "2.1",
* },
* });
* ```
* ### Bigquery Routine Pyspark Mainfile
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
* const testConnection = new gcp.bigquery.Connection("test", {
* connectionId: "connection_id",
* location: "US",
* spark: {},
* });
* const pysparkMainfile = new gcp.bigquery.Routine("pyspark_mainfile", {
* datasetId: test.datasetId,
* routineId: "routine_id",
* routineType: "PROCEDURE",
* language: "PYTHON",
* definitionBody: "",
* sparkOptions: {
* connection: testConnection.name,
* runtimeVersion: "2.1",
* mainFileUri: "gs://test-bucket/main.py",
* pyFileUris: ["gs://test-bucket/lib.py"],
* fileUris: ["gs://test-bucket/distribute_in_executor.json"],
* archiveUris: ["gs://test-bucket/distribute_in_executor.tar.gz"],
* },
* });
* ```
* ### Bigquery Routine Spark Jar
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
* const testConnection = new gcp.bigquery.Connection("test", {
* connectionId: "connection_id",
* location: "US",
* spark: {},
* });
* const sparkJar = new gcp.bigquery.Routine("spark_jar", {
* datasetId: test.datasetId,
* routineId: "routine_id",
* routineType: "PROCEDURE",
* language: "SCALA",
* definitionBody: "",
* sparkOptions: {
* connection: testConnection.name,
* runtimeVersion: "2.1",
* containerImage: "gcr.io/my-project-id/my-spark-image:latest",
* mainClass: "com.google.test.jar.MainClass",
* jarUris: ["gs://test-bucket/uberjar_spark_spark3.jar"],
* properties: {
* "spark.dataproc.scaling.version": "2",
* "spark.reducer.fetchMigratedShuffle.enabled": "true",
* },
* },
* });
* ```
* ### Bigquery Routine Data Governance Type
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "tf_test_dataset_id_81126"});
* const customMaskingRoutine = new gcp.bigquery.Routine("custom_masking_routine", {
* datasetId: test.datasetId,
* routineId: "custom_masking_routine",
* routineType: "SCALAR_FUNCTION",
* language: "SQL",
* dataGovernanceType: "DATA_MASKING",
* definitionBody: "SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')",
* arguments: [{
* name: "ssn",
* dataType: "{\"typeKind\" : \"STRING\"}",
* }],
* returnType: "{\"typeKind\" : \"STRING\"}",
* });
* ```
* ### Bigquery Routine Remote Function
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
*
* const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
* const testConnection = new gcp.bigquery.Connection("test", {
* connectionId: "connection_id",
* location: "US",
* cloudResource: {},
* });
* const remoteFunction = new gcp.bigquery.Routine("remote_function", {
* datasetId: test.datasetId,
* routineId: "routine_id",
* routineType: "SCALAR_FUNCTION",
* definitionBody: "",
* returnType: "{\"typeKind\" : \"STRING\"}",
* remoteFunctionOptions: {
* endpoint: "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add",
* connection: testConnection.name,
* maxBatchingRows: "10",
* userDefinedContext: {
* z: "1.5",
* },
* },
* });
* ```
*
* ## Import
*
* Routine can be imported using any of these accepted formats:
*
* * `projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}`
*
* * `{{project}}/{{dataset_id}}/{{routine_id}}`
*
* * `{{dataset_id}}/{{routine_id}}`
*
* When using the `pulumi import` command, Routine can be imported using one of the formats above. For example:
*
* ```sh
* $ pulumi import gcp:bigquery/routine:Routine default projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}
* ```
*
* ```sh
* $ pulumi import gcp:bigquery/routine:Routine default {{project}}/{{dataset_id}}/{{routine_id}}
* ```
*
* ```sh
* $ pulumi import gcp:bigquery/routine:Routine default {{dataset_id}}/{{routine_id}}
* ```
*/
export declare class Routine extends pulumi.CustomResource {
/**
* Get an existing Routine resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param state Any extra arguments used during the lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
static get(name: string, id: pulumi.Input<pulumi.ID>, state?: RoutineState, opts?: pulumi.CustomResourceOptions): Routine;
/**
* Returns true if the given object is an instance of Routine. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
static isInstance(obj: any): obj is Routine;
/**
* Input/output argument of a function or a stored procedure.
* Structure is documented below.
*/
readonly arguments: pulumi.Output<outputs.bigquery.RoutineArgument[] | undefined>;
/**
* The time when this routine was created, in milliseconds since the
* epoch.
*/
readonly creationTime: pulumi.Output<number>;
/**
* If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
* Possible values are: `DATA_MASKING`.
*/
readonly dataGovernanceType: pulumi.Output<string | undefined>;
/**
* The ID of the dataset containing this routine
*/
readonly datasetId: pulumi.Output<string>;
/**
* The body of the routine. For functions, this is the expression in the AS clause.
* If language=SQL, it is the substring inside (but excluding) the parentheses.
*/
readonly definitionBody: pulumi.Output<string>;
/**
* The description of the routine if defined.
*/
readonly description: pulumi.Output<string | undefined>;
/**
* The determinism level of the JavaScript UDF if defined.
* Possible values are: `DETERMINISM_LEVEL_UNSPECIFIED`, `DETERMINISTIC`, `NOT_DETERMINISTIC`.
*/
readonly determinismLevel: pulumi.Output<string | undefined>;
/**
* Optional. If language = "JAVASCRIPT", this field stores the path of the
* imported JAVASCRIPT libraries.
*/
readonly importedLibraries: pulumi.Output<string[] | undefined>;
/**
* The language of the routine.
* Possible values are: `SQL`, `JAVASCRIPT`, `PYTHON`, `JAVA`, `SCALA`.
*/
readonly language: pulumi.Output<string | undefined>;
/**
* The time when this routine was modified, in milliseconds since the
* epoch.
*/
readonly lastModifiedTime: pulumi.Output<number>;
/**
* The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
readonly project: pulumi.Output<string>;
/**
* Remote function specific options.
* Structure is documented below.
*/
readonly remoteFunctionOptions: pulumi.Output<outputs.bigquery.RoutineRemoteFunctionOptions | undefined>;
/**
* Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION".
* If absent, the return table type is inferred from definitionBody at query time in each query
* that references this routine. If present, then the columns in the evaluated table result will
* be cast to match the column types specificed in return table type, at query time.
*/
readonly returnTableType: pulumi.Output<string | undefined>;
/**
* A JSON schema for the return type. Optional if language = "SQL"; required otherwise.
* If absent, the return type is inferred from definitionBody at query time in each query
* that references this routine. If present, then the evaluated result will be cast to
* the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON
* string, any changes to the string will create a diff, even if the JSON itself hasn't
* changed. If the API returns a different value for the same schema, e.g. it switche
* d the order of values or replaced STRUCT field type with RECORD field type, we currently
* cannot suppress the recurring diff this causes. As a workaround, we recommend using
* the schema as returned by the API.
*/
readonly returnType: pulumi.Output<string | undefined>;
/**
* The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
*/
readonly routineId: pulumi.Output<string>;
/**
* The type of routine.
* Possible values are: `SCALAR_FUNCTION`, `PROCEDURE`, `TABLE_VALUED_FUNCTION`.
*/
readonly routineType: pulumi.Output<string>;
/**
* Optional. The security mode of the routine, if defined. If not defined, the security mode is automatically determined from the routine's configuration.
* Possible values are: `DEFINER`, `INVOKER`.
*/
readonly securityMode: pulumi.Output<string | undefined>;
/**
* Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure.
* Structure is documented below.
*/
readonly sparkOptions: pulumi.Output<outputs.bigquery.RoutineSparkOptions | undefined>;
/**
* Create a Routine resource with the given unique name, arguments, and options.
*
* @param name The _unique_ name of the resource.
* @param args The arguments to use to populate this resource's properties.
* @param opts A bag of options that control this resource's behavior.
*/
constructor(name: string, args: RoutineArgs, opts?: pulumi.CustomResourceOptions);
}
/**
* Input properties used for looking up and filtering Routine resources.
*/
export interface RoutineState {
/**
* Input/output argument of a function or a stored procedure.
* Structure is documented below.
*/
arguments?: pulumi.Input<pulumi.Input<inputs.bigquery.RoutineArgument>[]>;
/**
* The time when this routine was created, in milliseconds since the
* epoch.
*/
creationTime?: pulumi.Input<number>;
/**
* If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
* Possible values are: `DATA_MASKING`.
*/
dataGovernanceType?: pulumi.Input<string>;
/**
* The ID of the dataset containing this routine
*/
datasetId?: pulumi.Input<string>;
/**
* The body of the routine. For functions, this is the expression in the AS clause.
* If language=SQL, it is the substring inside (but excluding) the parentheses.
*/
definitionBody?: pulumi.Input<string>;
/**
* The description of the routine if defined.
*/
description?: pulumi.Input<string>;
/**
* The determinism level of the JavaScript UDF if defined.
* Possible values are: `DETERMINISM_LEVEL_UNSPECIFIED`, `DETERMINISTIC`, `NOT_DETERMINISTIC`.
*/
determinismLevel?: pulumi.Input<string>;
/**
* Optional. If language = "JAVASCRIPT", this field stores the path of the
* imported JAVASCRIPT libraries.
*/
importedLibraries?: pulumi.Input<pulumi.Input<string>[]>;
/**
* The language of the routine.
* Possible values are: `SQL`, `JAVASCRIPT`, `PYTHON`, `JAVA`, `SCALA`.
*/
language?: pulumi.Input<string>;
/**
* The time when this routine was modified, in milliseconds since the
* epoch.
*/
lastModifiedTime?: pulumi.Input<number>;
/**
* The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
project?: pulumi.Input<string>;
/**
* Remote function specific options.
* Structure is documented below.
*/
remoteFunctionOptions?: pulumi.Input<inputs.bigquery.RoutineRemoteFunctionOptions>;
/**
* Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION".
* If absent, the return table type is inferred from definitionBody at query time in each query
* that references this routine. If present, then the columns in the evaluated table result will
* be cast to match the column types specificed in return table type, at query time.
*/
returnTableType?: pulumi.Input<string>;
/**
* A JSON schema for the return type. Optional if language = "SQL"; required otherwise.
* If absent, the return type is inferred from definitionBody at query time in each query
* that references this routine. If present, then the evaluated result will be cast to
* the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON
* string, any changes to the string will create a diff, even if the JSON itself hasn't
* changed. If the API returns a different value for the same schema, e.g. it switche
* d the order of values or replaced STRUCT field type with RECORD field type, we currently
* cannot suppress the recurring diff this causes. As a workaround, we recommend using
* the schema as returned by the API.
*/
returnType?: pulumi.Input<string>;
/**
* The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
*/
routineId?: pulumi.Input<string>;
/**
* The type of routine.
* Possible values are: `SCALAR_FUNCTION`, `PROCEDURE`, `TABLE_VALUED_FUNCTION`.
*/
routineType?: pulumi.Input<string>;
/**
* Optional. The security mode of the routine, if defined. If not defined, the security mode is automatically determined from the routine's configuration.
* Possible values are: `DEFINER`, `INVOKER`.
*/
securityMode?: pulumi.Input<string>;
/**
* Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure.
* Structure is documented below.
*/
sparkOptions?: pulumi.Input<inputs.bigquery.RoutineSparkOptions>;
}
/**
* The set of arguments for constructing a Routine resource.
*/
export interface RoutineArgs {
/**
* Input/output argument of a function or a stored procedure.
* Structure is documented below.
*/
arguments?: pulumi.Input<pulumi.Input<inputs.bigquery.RoutineArgument>[]>;
/**
* If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
* Possible values are: `DATA_MASKING`.
*/
dataGovernanceType?: pulumi.Input<string>;
/**
* The ID of the dataset containing this routine
*/
datasetId: pulumi.Input<string>;
/**
* The body of the routine. For functions, this is the expression in the AS clause.
* If language=SQL, it is the substring inside (but excluding) the parentheses.
*/
definitionBody: pulumi.Input<string>;
/**
* The description of the routine if defined.
*/
description?: pulumi.Input<string>;
/**
* The determinism level of the JavaScript UDF if defined.
* Possible values are: `DETERMINISM_LEVEL_UNSPECIFIED`, `DETERMINISTIC`, `NOT_DETERMINISTIC`.
*/
determinismLevel?: pulumi.Input<string>;
/**
* Optional. If language = "JAVASCRIPT", this field stores the path of the
* imported JAVASCRIPT libraries.
*/
importedLibraries?: pulumi.Input<pulumi.Input<string>[]>;
/**
* The language of the routine.
* Possible values are: `SQL`, `JAVASCRIPT`, `PYTHON`, `JAVA`, `SCALA`.
*/
language?: pulumi.Input<string>;
/**
* The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
project?: pulumi.Input<string>;
/**
* Remote function specific options.
* Structure is documented below.
*/
remoteFunctionOptions?: pulumi.Input<inputs.bigquery.RoutineRemoteFunctionOptions>;
/**
* Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION".
* If absent, the return table type is inferred from definitionBody at query time in each query
* that references this routine. If present, then the columns in the evaluated table result will
* be cast to match the column types specificed in return table type, at query time.
*/
returnTableType?: pulumi.Input<string>;
/**
* A JSON schema for the return type. Optional if language = "SQL"; required otherwise.
* If absent, the return type is inferred from definitionBody at query time in each query
* that references this routine. If present, then the evaluated result will be cast to
* the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON
* string, any changes to the string will create a diff, even if the JSON itself hasn't
* changed. If the API returns a different value for the same schema, e.g. it switche
* d the order of values or replaced STRUCT field type with RECORD field type, we currently
* cannot suppress the recurring diff this causes. As a workaround, we recommend using
* the schema as returned by the API.
*/
returnType?: pulumi.Input<string>;
/**
* The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
*/
routineId: pulumi.Input<string>;
/**
* The type of routine.
* Possible values are: `SCALAR_FUNCTION`, `PROCEDURE`, `TABLE_VALUED_FUNCTION`.
*/
routineType: pulumi.Input<string>;
/**
* Optional. The security mode of the routine, if defined. If not defined, the security mode is automatically determined from the routine's configuration.
* Possible values are: `DEFINER`, `INVOKER`.
*/
securityMode?: pulumi.Input<string>;
/**
* Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure.
* Structure is documented below.
*/
sparkOptions?: pulumi.Input<inputs.bigquery.RoutineSparkOptions>;
}