@pulumi/fastly
Version:
A Pulumi package for creating and managing fastly cloud resources.. Based on terraform-provider-fastly: version v4
1,385 lines • 132 kB
TypeScript
import * as outputs from "../types/output";
export interface AlertDimensions {
/**
* Names of a subset of domains that the alert monitors.
*/
domains?: string[];
/**
* Addresses of a subset of backends that the alert monitors.
*/
origins?: string[];
}
export interface AlertEvaluationStrategy {
/**
* Threshold for the denominator value used in evaluations that calculate a rate or ratio. Usually used to filter out noise.
*/
ignoreBelow?: number;
/**
* The length of time to evaluate whether the conditions have been met. The data is polled every minute. One of: `2m`, `3m`, `5m`, `15m`, `30m`.
*/
period: string;
/**
* Threshold used to alert.
*/
threshold: number;
/**
* Type of strategy to use to evaluate. One of: `aboveThreshold`, `allAboveThreshold`, `belowThreshold`, `percentAbsolute`, `percentDecrease`, `percentIncrease`.
*/
type: string;
}
export interface CustomDashboardDashboardItem {
/**
* An object which describes the data to display.
*/
dataSource: outputs.CustomDashboardDashboardItemDataSource;
/**
* Dashboard item identifier (alphanumeric). Must be unique, relative to other items in the same dashboard.
*/
id: string;
/**
* The number of columns for the dashboard item to span. Dashboards are rendered on a 12-column grid on "desktop" screen sizes.
*/
span?: number;
/**
* A human-readable subtitle for the dashboard item. Often a description of the visualization.
*/
subtitle: string;
/**
* A human-readable title for the dashboard item.
*/
title: string;
/**
* An object which describes the data visualization to display.
*/
visualization: outputs.CustomDashboardDashboardItemVisualization;
}
export interface CustomDashboardDashboardItemDataSource {
/**
* Configuration options for the selected data source.
*/
config: outputs.CustomDashboardDashboardItemDataSourceConfig;
/**
* The source of the data to display. One of: `stats.edge`, `stats.domain`, `stats.origin`.
*/
type: string;
}
export interface CustomDashboardDashboardItemDataSourceConfig {
/**
* The metrics to visualize. Valid options are defined by the selected data source: [stats.edge](https://www.fastly.com/documentation/reference/api/observability/custom-dashboards/metrics/edge/), [stats.domain](https://www.fastly.com/documentation/reference/api/observability/custom-dashboards/metrics/domain/), [stats.origin](https://www.fastly.com/documentation/reference/api/observability/custom-dashboards/metrics/origin/).
*/
metrics: string[];
}
export interface CustomDashboardDashboardItemVisualization {
/**
* Configuration options for the selected data source.
*/
config: outputs.CustomDashboardDashboardItemVisualizationConfig;
/**
* The type of visualization to display. One of: `chart`.
*/
type: string;
}
export interface CustomDashboardDashboardItemVisualizationConfig {
/**
* The aggregation function to apply to the dataset. One of: `avg`, `sum`, `min`, `max`, `latest`, `p95`.
*/
calculationMethod?: string;
/**
* The units to use to format the data. One of: `number`, `bytes`, `percent`, `requests`, `responses`, `seconds`, `milliseconds`, `ratio`, `bitrate`.
*/
format?: string;
/**
* The type of chart to display. One of: `line`, `bar`, `single-metric`, `donut`.
*/
plotType: string;
}
export interface GetConfigstoresStore {
/**
* Alphanumeric string identifying the Config Store.
*/
id: string;
/**
* Name for the Config Store.
*/
name: string;
}
export interface GetDatacentersPop {
/**
* A code representing the POP location.
*/
code: string;
/**
* A code representing the general region of the world in which the POP location resides.
*/
group: string;
/**
* The name of the POP.
*/
name: string;
/**
* A code representing the shielding name of the POP. The value may be empty if the POP is not available for shielding.
*/
shield: string;
}
export interface GetDictionariesDictionary {
/**
* Alphanumeric string identifying the Dictionary.
*/
id: string;
/**
* Name for the Dictionary.
*/
name: string;
/**
* Indicates if items in the dictionary are readable or not.
*/
writeOnly: boolean;
}
export interface GetKvstoresStore {
/**
* Alphanumeric string identifying the KV Store.
*/
id: string;
/**
* Name for the KV Store.
*/
name: string;
}
export interface GetSecretstoresStore {
/**
* Alphanumeric string identifying the Secrets Store.
*/
id: string;
/**
* Name for the Secrets Store.
*/
name: string;
}
export interface GetServicesDetail {
/**
* A freeform descriptive note.
*/
comment: string;
/**
* Date and time in ISO 8601 format.
*/
createdAt: string;
/**
* Alphanumeric string identifying the customer.
*/
customerId: string;
/**
* Alphanumeric string identifying the service.
*/
id: string;
/**
* The name of the service.
*/
name: string;
/**
* The type of this service. One of `vcl`, `wasm`.
*/
type: string;
/**
* Date and time in ISO 8601 format.
*/
updatedAt: string;
/**
* The currently activated version.
*/
version: number;
}
export interface GetTlsConfigurationDnsRecord {
/**
* Type of DNS record to set, e.g. A, AAAA, or CNAME.
*/
recordType: string;
/**
* The IP address or hostname of the DNS record.
*/
recordValue: string;
/**
* The regions that will be used to route traffic. Select DNS Records with a `global` region to route traffic to the most performant point of presence (POP) worldwide (global pricing will apply). Select DNS records with a `us-eu` region to exclusively land traffic on North American and European POPs.
*/
region: string;
}
export interface GetVclSnippetsVclSnippet {
/**
* The VCL code that specifies exactly what the snippet does.
*/
content: string;
/**
* Alphanumeric string identifying a VCL Snippet.
*/
id: string;
/**
* The name for the snippet.
*/
name: string;
/**
* Priority determines execution order. Lower numbers execute first.
*/
priority: number;
/**
* The location in generated VCL where the snippet should be placed.
*/
type: string;
}
export interface ServiceACLEntriesEntry {
/**
* A personal freeform descriptive note
*/
comment?: string;
/**
* The unique ID of the entry
*/
id: string;
/**
* An IP address that is the focus for the ACL
*/
ip: string;
/**
* A boolean that will negate the match if true
*/
negated?: boolean;
/**
* An optional subnet mask applied to the IP address
*/
subnet?: string;
}
export interface ServiceComputeBackend {
/**
* An IPv4, hostname, or IPv6 address for the Backend
*/
address: string;
/**
* How long to wait between bytes in milliseconds. Default `10000`
*/
betweenBytesTimeout?: number;
/**
* How long to wait for a timeout in milliseconds. Default `1000`
*/
connectTimeout?: number;
/**
* Number of errors to allow before the Backend is marked as down. Default `0`
*/
errorThreshold?: number;
/**
* How long to wait for the first bytes in milliseconds. Default `15000`
*/
firstByteTimeout?: number;
/**
* Name of a defined `healthcheck` to assign to this backend
*/
healthcheck?: string;
/**
* How long in seconds to keep a persistent connection to the backend between requests.
*/
keepaliveTime?: number;
/**
* Maximum number of connections for this Backend. Default `200`
*/
maxConn?: number;
/**
* Maximum allowed TLS version on SSL connections to this backend.
*/
maxTlsVersion?: string;
/**
* Minimum allowed TLS version on SSL connections to this backend.
*/
minTlsVersion?: string;
/**
* Name for this Backend. Must be unique to this Service. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The hostname to override the Host header
*/
overrideHost?: string;
/**
* The port number on which the Backend responds. Default `80`
*/
port?: number;
/**
* Prefer IPv6 connections to origins for hostname backends. Default `true`
*/
preferIpv6?: boolean;
/**
* Value that when shared across backends will enable those backends to share the same health check.
*/
shareKey?: string;
/**
* The POP of the shield designated to reduce inbound load. Valid values for `shield` are included in the `GET /datacenters` API response
*/
shield?: string;
/**
* CA certificate attached to origin.
*/
sslCaCert?: string;
/**
* Configure certificate validation. Does not affect SNI at all
*/
sslCertHostname?: string;
/**
* Be strict about checking SSL certs. Default `true`
*/
sslCheckCert?: boolean;
/**
* Cipher list consisting of one or more cipher strings separated by colons. Commas or spaces are also acceptable separators but colons are normally used.
*/
sslCiphers?: string;
/**
* Client certificate attached to origin. Used when connecting to the backend
*/
sslClientCert?: string;
/**
* Client key attached to origin. Used when connecting to the backend
*/
sslClientKey?: string;
/**
* Configure SNI in the TLS handshake. Does not affect cert validation at all
*/
sslSniHostname?: string;
/**
* Whether or not to use SSL to reach the Backend. Default `false`
*/
useSsl?: boolean;
/**
* The [portion of traffic](https://docs.fastly.com/en/guides/load-balancing-configuration#how-weight-affects-load-balancing) to send to this Backend. Each Backend receives weight / total of the traffic. Default `100`
*/
weight?: number;
}
export interface ServiceComputeDictionary {
/**
* The ID of the dictionary
*/
dictionaryId: string;
/**
* Allow the dictionary to be deleted, even if it contains entries. Defaults to false.
*/
forceDestroy?: boolean;
/**
* A unique name to identify this dictionary. It is important to note that changing this attribute will delete and recreate the dictionary, and discard the current items in the dictionary
*/
name: string;
writeOnly?: boolean;
}
export interface ServiceComputeDomain {
/**
* An optional comment about the Domain.
*/
comment?: string;
/**
* The domain that this Service will respond to. It is important to note that changing this attribute will delete and recreate the resource.
*/
name: string;
}
export interface ServiceComputeImageOptimizerDefaultSettings {
/**
* Enables GIF to MP4 transformations on this service.
*/
allowVideo?: boolean;
/**
* The default quality to use with JPEG output. This can be overridden with the "quality" parameter on specific image optimizer requests.
*/
jpegQuality?: number;
/**
* The default type of JPEG output to use. This can be overridden with "format=bjpeg" and "format=pjpeg" on specific image optimizer requests. Valid values are `auto`, `baseline` and `progressive`.
* - auto: Match the input JPEG type, or baseline if transforming from a non-JPEG input.
* - baseline: Output baseline JPEG images
* - progressive: Output progressive JPEG images
*/
jpegType?: string;
/**
* Used by the provider to identify modified settings. Changing this value will force the entire block to be deleted, then recreated.
*/
name?: string;
/**
* The type of filter to use while resizing an image. Valid values are `lanczos3`, `lanczos2`, `bicubic`, `bilinear` and `nearest`.
* - lanczos3: A Lanczos filter with a kernel size of 3. Lanczos filters can detect edges and linear features within an image, providing the best possible reconstruction.
* - lanczos2: A Lanczos filter with a kernel size of 2.
* - bicubic: A filter using an average of a 4x4 environment of pixels, weighing the innermost pixels higher.
* - bilinear: A filter using an average of a 2x2 environment of pixels.
* - nearest: A filter using the value of nearby translated pixel values. Preserves hard edges.
*/
resizeFilter?: string;
/**
* Whether or not we should allow output images to render at sizes larger than input.
*/
upscale?: boolean;
/**
* Controls whether or not to default to WebP output when the client supports it. This is equivalent to adding "auto=webp" to all image optimizer requests.
*/
webp?: boolean;
/**
* The default quality to use with WebP output. This can be overridden with the second option in the "quality" URL parameter on specific image optimizer requests.
*/
webpQuality?: number;
}
export interface ServiceComputeLoggingBigquery {
/**
* The google account name used to obtain temporary credentials (default none). You may optionally provide this via an environment variable, `FASTLY_GCS_ACCOUNT_NAME`.
*/
accountName?: string;
/**
* The ID of your BigQuery dataset
*/
dataset: string;
/**
* The email for the service account with write access to your BigQuery dataset. If not provided, this will be pulled from a `FASTLY_BQ_EMAIL` environment variable
*/
email: string;
/**
* A unique name to identify this BigQuery logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The ID of your GCP project
*/
projectId: string;
/**
* The secret key associated with the service account that has write access to your BigQuery table. If not provided, this will be pulled from the `FASTLY_BQ_SECRET_KEY` environment variable. Typical format for this is a private key in a string with newlines
*/
secretKey: string;
/**
* The ID of your BigQuery table
*/
table: string;
/**
* BigQuery table name suffix template
*/
template?: string;
}
export interface ServiceComputeLoggingBlobstorage {
/**
* The unique Azure Blob Storage namespace in which your data objects are stored
*/
accountName: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* The name of the Azure Blob Storage container in which to store logs
*/
container: string;
/**
* Maximum size of an uploaded log file, if non-zero.
*/
fileMaxBytes?: number;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* A unique name to identify the Azure Blob Storage endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The path to upload logs to. Must end with a trailing slash. If this field is left empty, the files will be saved in the container's root path
*/
path?: string;
/**
* How frequently the logs should be transferred in seconds. Default `3600`
*/
period?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* A PGP public key that Fastly will use to encrypt your log files before writing them to disk
*/
publicKey?: string;
/**
* The Azure shared access signature providing write access to the blob service objects. Be sure to update your token before it expires or the logging functionality will not work
*/
sasToken: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
}
export interface ServiceComputeLoggingCloudfile {
/**
* Your Cloud File account access key
*/
accessKey: string;
/**
* The name of your Cloud Files container
*/
bucketName: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* The unique name of the Rackspace Cloud Files logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The path to upload logs to
*/
path?: string;
/**
* How frequently log files are finalized so they can be available for reading (in seconds, default `3600`)
*/
period?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The PGP public key that Fastly will use to encrypt your log files before writing them to disk
*/
publicKey?: string;
/**
* The region to stream logs to. One of: DFW (Dallas), ORD (Chicago), IAD (Northern Virginia), LON (London), SYD (Sydney), HKG (Hong Kong)
*/
region?: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
/**
* The username for your Cloud Files account
*/
user: string;
}
export interface ServiceComputeLoggingDatadog {
/**
* The unique name of the Datadog logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The region that log data will be sent to. One of `US` or `EU`. Defaults to `US` if undefined
*/
region?: string;
/**
* The API key from your Datadog account
*/
token: string;
}
export interface ServiceComputeLoggingDigitalocean {
/**
* Your DigitalOcean Spaces account access key
*/
accessKey: string;
/**
* The name of the DigitalOcean Space
*/
bucketName: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* The domain of the DigitalOcean Spaces endpoint (default `nyc3.digitaloceanspaces.com`)
*/
domain?: string;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* The unique name of the DigitalOcean Spaces logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The path to upload logs to
*/
path?: string;
/**
* How frequently log files are finalized so they can be available for reading (in seconds, default `3600`)
*/
period?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* A PGP public key that Fastly will use to encrypt your log files before writing them to disk
*/
publicKey?: string;
/**
* Your DigitalOcean Spaces account secret key
*/
secretKey: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
}
export interface ServiceComputeLoggingElasticsearch {
/**
* The name of the Elasticsearch index to send documents (logs) to
*/
index: string;
/**
* The unique name of the Elasticsearch logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* BasicAuth password for Elasticsearch
*/
password?: string;
/**
* The ID of the Elasticsearch ingest pipeline to apply pre-process transformations to before indexing
*/
pipeline?: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The maximum number of logs sent in one request. Defaults to `0` for unbounded
*/
requestMaxBytes?: number;
/**
* The maximum number of bytes sent in one request. Defaults to `0` for unbounded
*/
requestMaxEntries?: number;
/**
* A secure certificate to authenticate the server with. Must be in PEM format
*/
tlsCaCert?: string;
/**
* The client certificate used to make authenticated requests. Must be in PEM format
*/
tlsClientCert?: string;
/**
* The client private key used to make authenticated requests. Must be in PEM format
*/
tlsClientKey?: string;
/**
* The hostname used to verify the server's certificate. It can either be the Common Name (CN) or a Subject Alternative Name (SAN)
*/
tlsHostname?: string;
/**
* The Elasticsearch URL to stream logs to
*/
url: string;
/**
* BasicAuth username for Elasticsearch
*/
user?: string;
}
export interface ServiceComputeLoggingFtp {
/**
* The FTP address to stream logs to
*/
address: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* The unique name of the FTP logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The password for the server (for anonymous use an email address)
*/
password: string;
/**
* The path to upload log files to. If the path ends in `/` then it is treated as a directory
*/
path: string;
/**
* How frequently the logs should be transferred, in seconds (Default `3600`)
*/
period?: number;
/**
* The port number. Default: `21`
*/
port?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The PGP public key that Fastly will use to encrypt your log files before writing them to disk
*/
publicKey?: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
/**
* The username for the server (can be `anonymous`)
*/
user: string;
}
export interface ServiceComputeLoggingGc {
/**
* The google account name used to obtain temporary credentials (default none). You may optionally provide this via an environment variable, `FASTLY_GCS_ACCOUNT_NAME`.
*/
accountName?: string;
/**
* The name of the bucket in which to store the logs
*/
bucketName: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* A unique name to identify this GCS endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Path to store the files. Must end with a trailing slash. If this field is left empty, the files will be saved in the bucket's root path
*/
path?: string;
/**
* How frequently the logs should be transferred, in seconds (Default 3600)
*/
period?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The ID of your Google Cloud Platform project
*/
projectId?: string;
/**
* The secret key associated with the target gcs bucket on your account. You may optionally provide this secret via an environment variable, `FASTLY_GCS_SECRET_KEY`. A typical format for the key is PEM format, containing actual newline characters where required
*/
secretKey?: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
/**
* Your Google Cloud Platform service account email address. The `clientEmail` field in your service account authentication JSON. You may optionally provide this via an environment variable, `FASTLY_GCS_EMAIL`.
*/
user?: string;
}
export interface ServiceComputeLoggingGooglepubsub {
/**
* The google account name used to obtain temporary credentials (default none). You may optionally provide this via an environment variable, `FASTLY_GCS_ACCOUNT_NAME`.
*/
accountName?: string;
/**
* The unique name of the Google Cloud Pub/Sub logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The ID of your Google Cloud Platform project
*/
projectId: string;
/**
* Your Google Cloud Platform account secret key. The `privateKey` field in your service account authentication JSON. You may optionally provide this secret via an environment variable, `FASTLY_GOOGLE_PUBSUB_SECRET_KEY`.
*/
secretKey: string;
/**
* The Google Cloud Pub/Sub topic to which logs will be published
*/
topic: string;
/**
* Your Google Cloud Platform service account email address. The `clientEmail` field in your service account authentication JSON. You may optionally provide this via an environment variable, `FASTLY_GOOGLE_PUBSUB_EMAIL`.
*/
user: string;
}
export interface ServiceComputeLoggingGrafanacloudlog {
/**
* The stream identifier as a JSON string
*/
index: string;
/**
* The unique name of the GrafanaCloudLogs logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The Access Policy Token key for your GrafanaCloudLogs account
*/
token: string;
/**
* The URL to stream logs to
*/
url: string;
/**
* The Grafana User ID
*/
user: string;
}
export interface ServiceComputeLoggingHeroku {
/**
* The unique name of the Heroku logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The token to use for authentication (https://www.heroku.com/docs/customer-token-authentication-token/)
*/
token: string;
/**
* The URL to stream logs to
*/
url: string;
}
export interface ServiceComputeLoggingHoneycomb {
/**
* The Honeycomb Dataset you want to log to
*/
dataset: string;
/**
* The unique name of the Honeycomb logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The Write Key from the Account page of your Honeycomb account
*/
token: string;
}
export interface ServiceComputeLoggingHttp {
/**
* Value of the `Content-Type` header sent with the request
*/
contentType?: string;
/**
* Custom header sent with the request
*/
headerName?: string;
/**
* Value of the custom header sent with the request
*/
headerValue?: string;
/**
* Formats log entries as JSON. Can be either disabled (`0`), array of json (`1`), or newline delimited json (`2`)
*/
jsonFormat?: string;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* HTTP method used for request. Can be either `POST` or `PUT`. Default `POST`
*/
method?: string;
/**
* The unique name of the HTTPS logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The maximum number of bytes sent in one request
*/
requestMaxBytes?: number;
/**
* The maximum number of logs sent in one request
*/
requestMaxEntries?: number;
/**
* A secure certificate to authenticate the server with. Must be in PEM format
*/
tlsCaCert?: string;
/**
* The client certificate used to make authenticated requests. Must be in PEM format
*/
tlsClientCert?: string;
/**
* The client private key used to make authenticated requests. Must be in PEM format
*/
tlsClientKey?: string;
/**
* Used during the TLS handshake to validate the certificate
*/
tlsHostname?: string;
/**
* URL that log data will be sent to. Must use the https protocol
*/
url: string;
}
export interface ServiceComputeLoggingKafka {
/**
* SASL authentication method. One of: plain, scram-sha-256, scram-sha-512
*/
authMethod?: string;
/**
* A comma-separated list of IP addresses or hostnames of Kafka brokers
*/
brokers: string;
/**
* The codec used for compression of your logs. One of: `gzip`, `snappy`, `lz4`
*/
compressionCodec?: string;
/**
* The unique name of the Kafka logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Enables parsing of key=value tuples from the beginning of a logline, turning them into record headers
*/
parseLogKeyvals?: boolean;
/**
* SASL Pass
*/
password?: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* Maximum size of log batch, if non-zero. Defaults to 0 for unbounded
*/
requestMaxBytes?: number;
/**
* The Number of acknowledgements a leader must receive before a write is considered successful. One of: `1` (default) One server needs to respond. `0` No servers need to respond. `-1` Wait for all in-sync replicas to respond
*/
requiredAcks?: string;
/**
* A secure certificate to authenticate the server with. Must be in PEM format
*/
tlsCaCert?: string;
/**
* The client certificate used to make authenticated requests. Must be in PEM format
*/
tlsClientCert?: string;
/**
* The client private key used to make authenticated requests. Must be in PEM format
*/
tlsClientKey?: string;
/**
* The hostname used to verify the server's certificate. It can either be the Common Name or a Subject Alternative Name (SAN)
*/
tlsHostname?: string;
/**
* The Kafka topic to send logs to
*/
topic: string;
/**
* Whether to use TLS for secure logging. Can be either `true` or `false`
*/
useTls?: boolean;
/**
* SASL User
*/
user?: string;
}
export interface ServiceComputeLoggingKinese {
/**
* The AWS access key to be used to write to the stream
*/
accessKey?: string;
/**
* The Amazon Resource Name (ARN) for the IAM role granting Fastly access to Kinesis. Not required if `accessKey` and `secretKey` are provided.
*/
iamRole?: string;
/**
* The unique name of the Kinesis logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The AWS region the stream resides in. (Default: `us-east-1`)
*/
region?: string;
/**
* The AWS secret access key to authenticate with
*/
secretKey?: string;
/**
* The Kinesis stream name
*/
topic: string;
}
export interface ServiceComputeLoggingLogentry {
/**
* The unique name of the Logentries logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The port number configured in Logentries
*/
port?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* Use token based authentication (https://logentries.com/doc/input-token/)
*/
token: string;
/**
* Whether to use TLS for secure logging
*/
useTls?: boolean;
}
export interface ServiceComputeLoggingLoggly {
/**
* The unique name of the Loggly logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The token to use for authentication (https://www.loggly.com/docs/customer-token-authentication-token/).
*/
token: string;
}
export interface ServiceComputeLoggingLogshuttle {
/**
* The unique name of the Log Shuttle logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The data authentication token associated with this endpoint
*/
token: string;
/**
* Your Log Shuttle endpoint URL
*/
url: string;
}
export interface ServiceComputeLoggingNewrelic {
/**
* The unique name of the New Relic logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The region that log data will be sent to. Default: `US`
*/
region?: string;
/**
* The Insert API key from the Account page of your New Relic account
*/
token: string;
}
export interface ServiceComputeLoggingOpenstack {
/**
* Your OpenStack account access key
*/
accessKey: string;
/**
* The name of your OpenStack container
*/
bucketName: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* The unique name of the OpenStack logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Path to store the files. Must end with a trailing slash. If this field is left empty, the files will be saved in the bucket's root path
*/
path?: string;
/**
* How frequently the logs should be transferred, in seconds. Default `3600`
*/
period?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* A PGP public key that Fastly will use to encrypt your log files before writing them to disk
*/
publicKey?: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
/**
* Your OpenStack auth url
*/
url: string;
/**
* The username for your OpenStack account
*/
user: string;
}
export interface ServiceComputeLoggingPapertrail {
/**
* The address of the Papertrail endpoint
*/
address: string;
/**
* A unique name to identify this Papertrail endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The port associated with the address where the Papertrail endpoint can be accessed
*/
port: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
}
export interface ServiceComputeLoggingS3 {
/**
* The AWS [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) to use for objects uploaded to the S3 bucket. Options are: `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, `bucket-owner-full-control`
*/
acl?: string;
/**
* The name of the bucket in which to store the logs
*/
bucketName: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* If you created the S3 bucket outside of `us-east-1`, then specify the corresponding bucket endpoint. Example: `s3-us-west-2.amazonaws.com`
*/
domain?: string;
/**
* Maximum size of an uploaded log file, if non-zero.
*/
fileMaxBytes?: number;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* The unique name of the S3 logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Path to store the files. Must end with a trailing slash. If this field is left empty, the files will be saved in the bucket's root path
*/
path?: string;
/**
* How frequently the logs should be transferred, in seconds. Default `3600`
*/
period?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* A PGP public key that Fastly will use to encrypt your log files before writing them to disk
*/
publicKey?: string;
/**
* The S3 storage class (redundancy level). Should be one of: `standard`, `intelligentTiering`, `standardIa`, `onezoneIa`, `glacier`, `glacierIr`, `deepArchive`, or `reducedRedundancy`
*/
redundancy?: string;
/**
* AWS Access Key of an account with the required permissions to post logs. It is **strongly** recommended you create a separate IAM user with permissions to only operate on this Bucket. This key will be not be encrypted. Not required if `iamRole` is provided. You can provide this key via an environment variable, `FASTLY_S3_ACCESS_KEY`
*/
s3AccessKey?: string;
/**
* The Amazon Resource Name (ARN) for the IAM role granting Fastly access to S3. Not required if `accessKey` and `secretKey` are provided. You can provide this value via an environment variable, `FASTLY_S3_IAM_ROLE`
*/
s3IamRole?: string;
/**
* AWS Secret Key of an account with the required permissions to post logs. It is **strongly** recommended you create a separate IAM user with permissions to only operate on this Bucket. This secret will be not be encrypted. Not required if `iamRole` is provided. You can provide this secret via an environment variable, `FASTLY_S3_SECRET_KEY`
*/
s3SecretKey?: string;
/**
* Specify what type of server side encryption should be used. Can be either `AES256` or `aws:kms`
*/
serverSideEncryption?: string;
/**
* Optional server-side KMS Key Id. Must be set if serverSideEncryption is set to `aws:kms`
*/
serverSideEncryptionKmsKeyId?: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
}
export interface ServiceComputeLoggingScalyr {
/**
* The unique name of the Scalyr logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* The name of the logfile field sent to Scalyr
*/
projectId?: string;
/**
* The region that log data will be sent to. One of `US` or `EU`. Defaults to `US` if undefined
*/
region?: string;
/**
* The token to use for authentication (https://www.scalyr.com/keys)
*/
token: string;
}
export interface ServiceComputeLoggingSftp {
/**
* The SFTP address to stream logs to
*/
address: string;
/**
* The codec used for compression of your logs. Valid values are zstd, snappy, and gzip. If the specified codec is "gzip", gzipLevel will default to 3. To specify a different level, leave compressionCodec blank and explicitly set the level using gzip_level. Specifying both compressionCodec and gzipLevel in the same API request will result in an error.
*/
compressionCodec?: string;
/**
* Level of Gzip compression from `0-9`. `0` means no compression. `1` is the fastest and the least compressed version, `9` is the slowest and the most compressed version. Default `0`
*/
gzipLevel?: number;
/**
* How the message should be formatted. Can be either `classic`, `loggly`, `logplex` or `blank`. Default is `classic`
*/
messageType?: string;
/**
* The unique name of the SFTP logging endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* The password for the server. If both `password` and `secretKey` are passed, `secretKey` will be preferred
*/
password?: string;
/**
* The path to upload log files to. If the path ends in `/` then it is treated as a directory
*/
path: string;
/**
* How frequently log files are finalized so they can be available for reading (in seconds, default `3600`)
*/
period?: number;
/**
* The port the SFTP service listens on. (Default: `22`)
*/
port?: number;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* A PGP public key that Fastly will use to encrypt your log files before writing them to disk
*/
publicKey?: string;
/**
* The SSH private key for the server. If both `password` and `secretKey` are passed, `secretKey` will be preferred
*/
secretKey?: string;
/**
* A list of host keys for all hosts we can connect to over SFTP
*/
sshKnownHosts: string;
/**
* The `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)
*/
timestampFormat?: string;
/**
* The username for the server
*/
user: string;
}
export interface ServiceComputeLoggingSplunk {
/**
* A unique name to identify the Splunk endpoint. It is important to note that changing this attribute will delete and recreate the resource
*/
name: string;
/**
* Region where logs will be processed before streaming to BigQuery. Valid values are 'none', 'us' and 'eu'.
*/
processingRegion?: string;
/**
* A secure certificate to authenticate the server with. Must be in PEM format. You can provide this certificate via an environment variable, `FASTLY_SPLUNK_CA_CERT`
*/
tlsCaCert?: string;
/**
* The client certificate used to make authenticated requests. Must be in PEM format.
*/
tlsClientCert?: string;
/**
* The client private key used to make authenticated requests. Must be in PEM format.
*/
tlsClientKey?: string;
/**
* The hostname used to verify the server's certificate. It can either be the Common Name or a Subject Alternative Name (SAN)
*/
tlsHostname?: string;
/**
* The Splunk token to be used for authentication
*/
token: string;
/**
* The