UNPKG

effect

Version:

The missing standard library for TypeScript, for writing production-grade software.

1,459 lines (1,416 loc) 357 kB
/** * @since 2.0.0 */ import type * as Cause from "./Cause.js" import type * as Channel from "./Channel.js" import type * as Chunk from "./Chunk.js" import type * as Context from "./Context.js" import type * as Deferred from "./Deferred.js" import type * as Duration from "./Duration.js" import type * as Effect from "./Effect.js" import type * as Either from "./Either.js" import type { ExecutionPlan } from "./ExecutionPlan.js" import type * as Exit from "./Exit.js" import type { LazyArg } from "./Function.js" import type * as GroupBy from "./GroupBy.js" import type { TypeLambda } from "./HKT.js" import * as groupBy_ from "./internal/groupBy.js" import * as internal from "./internal/stream.js" import type * as Layer from "./Layer.js" import type * as Option from "./Option.js" import type * as Order from "./Order.js" import type { Pipeable } from "./Pipeable.js" import type { Predicate, Refinement } from "./Predicate.js" import type * as PubSub from "./PubSub.js" import type * as Queue from "./Queue.js" import type { Runtime } from "./Runtime.js" import type * as Schedule from "./Schedule.js" import type * as Scope from "./Scope.js" import type * as Sink from "./Sink.js" import type * as Emit from "./StreamEmit.js" import type * as HaltStrategy from "./StreamHaltStrategy.js" import type * as Take from "./Take.js" import type { TPubSub } from "./TPubSub.js" import type { TDequeue } from "./TQueue.js" import type * as Tracer from "./Tracer.js" import type { Covariant, NoInfer, TupleOf } from "./Types.js" import type * as Unify from "./Unify.js" /** * @since 2.0.0 * @category symbols */ export const StreamTypeId: unique symbol = internal.StreamTypeId /** * @since 2.0.0 * @category symbols */ export type StreamTypeId = typeof StreamTypeId /** * A `Stream<A, E, R>` is a description of a program that, when evaluated, may * emit zero or more values of type `A`, may fail with errors of type `E`, and * uses an context of type `R`. One way to think of `Stream` is as a * `Effect` program that could emit multiple values. * * `Stream` is a purely functional *pull* based stream. Pull based streams offer * inherent laziness and backpressure, relieving users of the need to manage * buffers between operators. As an optimization, `Stream` does not emit * single values, but rather an array of values. This allows the cost of effect * evaluation to be amortized. * * `Stream` forms a monad on its `A` type parameter, and has error management * facilities for its `E` type parameter, modeled similarly to `Effect` (with * some adjustments for the multiple-valued nature of `Stream`). These aspects * allow for rich and expressive composition of streams. * * @since 2.0.0 * @category models */ export interface Stream<out A, out E = never, out R = never> extends Stream.Variance<A, E, R>, Pipeable { [Unify.typeSymbol]?: unknown [Unify.unifySymbol]?: StreamUnify<this> [Unify.ignoreSymbol]?: StreamUnifyIgnore } /** * @since 2.0.0 * @category models */ export interface StreamUnify<A extends { [Unify.typeSymbol]?: any }> extends Effect.EffectUnify<A> { Stream?: () => A[Unify.typeSymbol] extends Stream<infer A0, infer E0, infer R0> | infer _ ? Stream<A0, E0, R0> : never } /** * @category models * @since 2.0.0 */ export interface StreamUnifyIgnore extends Effect.EffectUnifyIgnore { Effect?: true } /** * @since 2.0.0 * @category models */ declare module "./Effect.js" { interface Effect<A, E, R> extends Stream<A, E, R> {} } /** * @category type lambdas * @since 2.0.0 */ export interface StreamTypeLambda extends TypeLambda { readonly type: Stream<this["Target"], this["Out1"], this["Out2"]> } /** * @since 2.0.0 */ export declare namespace Stream { /** * @since 2.0.0 * @category models */ export interface Variance<out A, out E, out R> { readonly [StreamTypeId]: VarianceStruct<A, E, R> } /** * @since 3.4.0 * @category models */ export interface VarianceStruct<out A, out E, out R> { readonly _A: Covariant<A> readonly _E: Covariant<E> readonly _R: Covariant<R> } /** * @since 3.4.0 * @category type-level */ export type Success<T extends Stream<any, any, any>> = [T] extends [Stream<infer _A, infer _E, infer _R>] ? _A : never /** * @since 3.4.0 * @category type-level */ export type Error<T extends Stream<any, any, any>> = [T] extends [Stream<infer _A, infer _E, infer _R>] ? _E : never /** * @since 3.4.0 * @category type-level */ export type Context<T extends Stream<any, any, any>> = [T] extends [Stream<infer _A, infer _E, infer _R>] ? _R : never /** * @since 2.0.0 * @category models * @deprecated use Types.TupleOf instead */ export type DynamicTuple<T, N extends number> = N extends N ? number extends N ? Array<T> : DynamicTupleOf<T, N, []> : never /** * @since 2.0.0 * @category models * @deprecated use Types.TupleOf instead */ export type DynamicTupleOf<T, N extends number, R extends Array<unknown>> = R["length"] extends N ? R : DynamicTupleOf<T, N, [T, ...R]> } /** * The default chunk size used by the various combinators and constructors of * `Stream`. * * @since 2.0.0 * @category constants */ export const DefaultChunkSize: number = internal.DefaultChunkSize /** * Collects each underlying Chunk of the stream into a new chunk, and emits it * on each pull. * * @since 2.0.0 * @category utils */ export const accumulate: <A, E, R>(self: Stream<A, E, R>) => Stream<Chunk.Chunk<A>, E, R> = internal.accumulate /** * Re-chunks the elements of the stream by accumulating each underlying chunk. * * @since 2.0.0 * @category utils */ export const accumulateChunks: <A, E, R>(self: Stream<A, E, R>) => Stream<A, E, R> = internal.accumulateChunks /** * Creates a stream from a single value that will get cleaned up after the * stream is consumed. * * @example * ```ts * import { Console, Effect, Stream } from "effect" * * // Simulating File operations * const open = (filename: string) => * Effect.gen(function*() { * yield* Console.log(`Opening ${filename}`) * return { * getLines: Effect.succeed(["Line 1", "Line 2", "Line 3"]), * close: Console.log(`Closing ${filename}`) * } * }) * * const stream = Stream.acquireRelease( * open("file.txt"), * (file) => file.close * ).pipe(Stream.flatMap((file) => file.getLines)) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // Opening file.txt * // Closing file.txt * // { _id: 'Chunk', values: [ [ 'Line 1', 'Line 2', 'Line 3' ] ] } * ``` * * @since 2.0.0 * @category constructors */ export const acquireRelease: <A, E, R, R2, X>( acquire: Effect.Effect<A, E, R>, release: (resource: A, exit: Exit.Exit<unknown, unknown>) => Effect.Effect<X, never, R2> ) => Stream<A, E, R | R2> = internal.acquireRelease /** * Aggregates elements of this stream using the provided sink for as long as * the downstream operators on the stream are busy. * * This operator divides the stream into two asynchronous "islands". Operators * upstream of this operator run on one fiber, while downstream operators run * on another. Whenever the downstream fiber is busy processing elements, the * upstream fiber will feed elements into the sink until it signals * completion. * * Any sink can be used here, but see `Sink.foldWeightedEffect` and * `Sink.foldUntilEffect` for sinks that cover the common usecases. * * @since 2.0.0 * @category utils */ export const aggregate: { /** * Aggregates elements of this stream using the provided sink for as long as * the downstream operators on the stream are busy. * * This operator divides the stream into two asynchronous "islands". Operators * upstream of this operator run on one fiber, while downstream operators run * on another. Whenever the downstream fiber is busy processing elements, the * upstream fiber will feed elements into the sink until it signals * completion. * * Any sink can be used here, but see `Sink.foldWeightedEffect` and * `Sink.foldUntilEffect` for sinks that cover the common usecases. * * @since 2.0.0 * @category utils */ <B, A, A2, E2, R2>(sink: Sink.Sink<B, A | A2, A2, E2, R2>): <E, R>(self: Stream<A, E, R>) => Stream<B, E2 | E, R2 | R> /** * Aggregates elements of this stream using the provided sink for as long as * the downstream operators on the stream are busy. * * This operator divides the stream into two asynchronous "islands". Operators * upstream of this operator run on one fiber, while downstream operators run * on another. Whenever the downstream fiber is busy processing elements, the * upstream fiber will feed elements into the sink until it signals * completion. * * Any sink can be used here, but see `Sink.foldWeightedEffect` and * `Sink.foldUntilEffect` for sinks that cover the common usecases. * * @since 2.0.0 * @category utils */ <A, E, R, B, A2, E2, R2>(self: Stream<A, E, R>, sink: Sink.Sink<B, A | A2, A2, E2, R2>): Stream<B, E | E2, R | R2> } = internal.aggregate /** * Like {@link aggregateWithinEither}, but only returns the `Right` results. * * @since 2.0.0 * @category utils */ export const aggregateWithin: { /** * Like {@link aggregateWithinEither}, but only returns the `Right` results. * * @since 2.0.0 * @category utils */ <B, A, A2, E2, R2, C, R3>( sink: Sink.Sink<B, A | A2, A2, E2, R2>, schedule: Schedule.Schedule<C, Option.Option<B>, R3> ): <E, R>(self: Stream<A, E, R>) => Stream<B, E2 | E, R2 | R3 | R> /** * Like {@link aggregateWithinEither}, but only returns the `Right` results. * * @since 2.0.0 * @category utils */ <A, E, R, B, A2, E2, R2, C, R3>( self: Stream<A, E, R>, sink: Sink.Sink<B, A | A2, A2, E2, R2>, schedule: Schedule.Schedule<C, Option.Option<B>, R3> ): Stream<B, E | E2, R | R2 | R3> } = internal.aggregateWithin /** * Aggregates elements using the provided sink until it completes, or until * the delay signalled by the schedule has passed. * * This operator divides the stream into two asynchronous islands. Operators * upstream of this operator run on one fiber, while downstream operators run * on another. Elements will be aggregated by the sink until the downstream * fiber pulls the aggregated value, or until the schedule's delay has passed. * * Aggregated elements will be fed into the schedule to determine the delays * between pulls. * * @since 2.0.0 * @category utils */ export const aggregateWithinEither: { /** * Aggregates elements using the provided sink until it completes, or until * the delay signalled by the schedule has passed. * * This operator divides the stream into two asynchronous islands. Operators * upstream of this operator run on one fiber, while downstream operators run * on another. Elements will be aggregated by the sink until the downstream * fiber pulls the aggregated value, or until the schedule's delay has passed. * * Aggregated elements will be fed into the schedule to determine the delays * between pulls. * * @since 2.0.0 * @category utils */ <B, A, A2, E2, R2, C, R3>( sink: Sink.Sink<B, A | A2, A2, E2, R2>, schedule: Schedule.Schedule<C, Option.Option<B>, R3> ): <E, R>(self: Stream<A, E, R>) => Stream<Either.Either<B, C>, E2 | E, R2 | R3 | R> /** * Aggregates elements using the provided sink until it completes, or until * the delay signalled by the schedule has passed. * * This operator divides the stream into two asynchronous islands. Operators * upstream of this operator run on one fiber, while downstream operators run * on another. Elements will be aggregated by the sink until the downstream * fiber pulls the aggregated value, or until the schedule's delay has passed. * * Aggregated elements will be fed into the schedule to determine the delays * between pulls. * * @since 2.0.0 * @category utils */ <A, E, R, B, A2, E2, R2, C, R3>( self: Stream<A, E, R>, sink: Sink.Sink<B, A | A2, A2, E2, R2>, schedule: Schedule.Schedule<C, Option.Option<B>, R3> ): Stream<Either.Either<B, C>, E | E2, R | R2 | R3> } = internal.aggregateWithinEither /** * Maps the success values of this stream to the specified constant value. * * @example * ```ts * import { Effect, Stream } from "effect" * * const stream = Stream.range(1, 5).pipe(Stream.as(null)) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // { _id: 'Chunk', values: [ null, null, null, null, null ] } * ``` * * @since 2.0.0 * @category mapping */ export const as: { /** * Maps the success values of this stream to the specified constant value. * * @example * ```ts * import { Effect, Stream } from "effect" * * const stream = Stream.range(1, 5).pipe(Stream.as(null)) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // { _id: 'Chunk', values: [ null, null, null, null, null ] } * ``` * * @since 2.0.0 * @category mapping */ <B>(value: B): <A, E, R>(self: Stream<A, E, R>) => Stream<B, E, R> /** * Maps the success values of this stream to the specified constant value. * * @example * ```ts * import { Effect, Stream } from "effect" * * const stream = Stream.range(1, 5).pipe(Stream.as(null)) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // { _id: 'Chunk', values: [ null, null, null, null, null ] } * ``` * * @since 2.0.0 * @category mapping */ <A, E, R, B>(self: Stream<A, E, R>, value: B): Stream<B, E, R> } = internal.as const _async: <A, E = never, R = never>( register: (emit: Emit.Emit<R, E, A, void>) => Effect.Effect<void, never, R> | void, bufferSize?: number | "unbounded" | { readonly bufferSize?: number | undefined readonly strategy?: "dropping" | "sliding" | "suspend" | undefined } | undefined ) => Stream<A, E, R> = internal._async export { /** * Creates a stream from an asynchronous callback that can be called multiple * times. The optionality of the error type `E` in `Emit` can be used to * signal the end of the stream by setting it to `None`. * * The registration function can optionally return an `Effect`, which will be * executed if the `Fiber` executing this Effect is interrupted. * * @example * ```ts * import type { StreamEmit } from "effect" * import { Chunk, Effect, Option, Stream } from "effect" * * const events = [1, 2, 3, 4] * * const stream = Stream.async( * (emit: StreamEmit.Emit<never, never, number, void>) => { * events.forEach((n) => { * setTimeout(() => { * if (n === 3) { * emit(Effect.fail(Option.none())) // Terminate the stream * } else { * emit(Effect.succeed(Chunk.of(n))) // Add the current item to the stream * } * }, 100 * n) * }) * } * ) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // { _id: 'Chunk', values: [ 1, 2 ] } * * ``` * @since 2.0.0 * @category constructors */ _async as async } /** * Creates a stream from an asynchronous callback that can be called multiple * times The registration of the callback itself returns an effect. The * optionality of the error type `E` can be used to signal the end of the * stream, by setting it to `None`. * * @since 2.0.0 * @category constructors */ export const asyncEffect: <A, E = never, R = never>( register: (emit: Emit.Emit<R, E, A, void>) => Effect.Effect<unknown, E, R>, bufferSize?: number | "unbounded" | { readonly bufferSize?: number | undefined readonly strategy?: "dropping" | "sliding" | "suspend" | undefined } | undefined ) => Stream<A, E, R> = internal.asyncEffect /** * Creates a stream from an external push-based resource. * * You can use the `emit` helper to emit values to the stream. The `emit` helper * returns a boolean indicating whether the value was emitted or not. * * You can also use the `emit` helper to signal the end of the stream by * using apis such as `emit.end` or `emit.fail`. * * By default it uses an "unbounded" buffer size. * You can customize the buffer size and strategy by passing an object as the * second argument with the `bufferSize` and `strategy` fields. * * @example * ```ts * import { Effect, Stream } from "effect" * * Stream.asyncPush<string>((emit) => * Effect.acquireRelease( * Effect.gen(function*() { * yield* Effect.log("subscribing") * return setInterval(() => emit.single("tick"), 1000) * }), * (handle) => * Effect.gen(function*() { * yield* Effect.log("unsubscribing") * clearInterval(handle) * }) * ), { bufferSize: 16, strategy: "dropping" }) * ``` * * @since 3.6.0 * @category constructors */ export const asyncPush: <A, E = never, R = never>( register: (emit: Emit.EmitOpsPush<E, A>) => Effect.Effect<unknown, E, R | Scope.Scope>, options?: { readonly bufferSize: "unbounded" } | { readonly bufferSize?: number | undefined readonly strategy?: "dropping" | "sliding" | undefined } | undefined ) => Stream<A, E, Exclude<R, Scope.Scope>> = internal.asyncPush /** * Creates a stream from an asynchronous callback that can be called multiple * times. The registration of the callback itself returns an a scoped * resource. The optionality of the error type `E` can be used to signal the * end of the stream, by setting it to `None`. * * @since 2.0.0 * @category constructors */ export const asyncScoped: <A, E = never, R = never>( register: (emit: Emit.Emit<R, E, A, void>) => Effect.Effect<unknown, E, R | Scope.Scope>, bufferSize?: number | "unbounded" | { readonly bufferSize?: number | undefined readonly strategy?: "dropping" | "sliding" | "suspend" | undefined } | undefined ) => Stream<A, E, Exclude<R, Scope.Scope>> = internal.asyncScoped /** * Returns a `Stream` that first collects `n` elements from the input `Stream`, * and then creates a new `Stream` using the specified function, and sends all * the following elements through that. * * @since 2.0.0 * @category sequencing */ export const branchAfter: { /** * Returns a `Stream` that first collects `n` elements from the input `Stream`, * and then creates a new `Stream` using the specified function, and sends all * the following elements through that. * * @since 2.0.0 * @category sequencing */ <A, A2, E2, R2>(n: number, f: (input: Chunk.Chunk<A>) => Stream<A2, E2, R2>): <E, R>(self: Stream<A, E, R>) => Stream<A2, E2 | E, R2 | R> /** * Returns a `Stream` that first collects `n` elements from the input `Stream`, * and then creates a new `Stream` using the specified function, and sends all * the following elements through that. * * @since 2.0.0 * @category sequencing */ <A, E, R, A2, E2, R2>( self: Stream<A, E, R>, n: number, f: (input: Chunk.Chunk<A>) => Stream<A2, E2, R2> ): Stream<A2, E | E2, R | R2> } = internal.branchAfter /** * Fan out the stream, producing a list of streams that have the same elements * as this stream. The driver stream will only ever advance the `maximumLag` * chunks before the slowest downstream stream. * * @example * ```ts * import { Console, Effect, Fiber, Schedule, Stream } from "effect" * * const numbers = Effect.scoped( * Stream.range(1, 20).pipe( * Stream.tap((n) => Console.log(`Emit ${n} element before broadcasting`)), * Stream.broadcast(2, 5), * Stream.flatMap(([first, second]) => * Effect.gen(function*() { * const fiber1 = yield* Stream.runFold(first, 0, (acc, e) => Math.max(acc, e)).pipe( * Effect.andThen((max) => Console.log(`Maximum: ${max}`)), * Effect.fork * ) * const fiber2 = yield* second.pipe( * Stream.schedule(Schedule.spaced("1 second")), * Stream.runForEach((n) => Console.log(`Logging to the Console: ${n}`)), * Effect.fork * ) * yield* Fiber.join(fiber1).pipe( * Effect.zip(Fiber.join(fiber2), { concurrent: true }) * ) * }) * ), * Stream.runCollect * ) * ) * * Effect.runPromise(numbers).then(console.log) * // Emit 1 element before broadcasting * // Emit 2 element before broadcasting * // Emit 3 element before broadcasting * // Emit 4 element before broadcasting * // Emit 5 element before broadcasting * // Emit 6 element before broadcasting * // Emit 7 element before broadcasting * // Emit 8 element before broadcasting * // Emit 9 element before broadcasting * // Emit 10 element before broadcasting * // Emit 11 element before broadcasting * // Logging to the Console: 1 * // Logging to the Console: 2 * // Logging to the Console: 3 * // Logging to the Console: 4 * // Logging to the Console: 5 * // Emit 12 element before broadcasting * // Emit 13 element before broadcasting * // Emit 14 element before broadcasting * // Emit 15 element before broadcasting * // Emit 16 element before broadcasting * // Logging to the Console: 6 * // Logging to the Console: 7 * // Logging to the Console: 8 * // Logging to the Console: 9 * // Logging to the Console: 10 * // Emit 17 element before broadcasting * // Emit 18 element before broadcasting * // Emit 19 element before broadcasting * // Emit 20 element before broadcasting * // Logging to the Console: 11 * // Logging to the Console: 12 * // Logging to the Console: 13 * // Logging to the Console: 14 * // Logging to the Console: 15 * // Maximum: 20 * // Logging to the Console: 16 * // Logging to the Console: 17 * // Logging to the Console: 18 * // Logging to the Console: 19 * // Logging to the Console: 20 * // { _id: 'Chunk', values: [ undefined ] } * ``` * * @since 2.0.0 * @category utils */ export const broadcast: { /** * Fan out the stream, producing a list of streams that have the same elements * as this stream. The driver stream will only ever advance the `maximumLag` * chunks before the slowest downstream stream. * * @example * ```ts * import { Console, Effect, Fiber, Schedule, Stream } from "effect" * * const numbers = Effect.scoped( * Stream.range(1, 20).pipe( * Stream.tap((n) => Console.log(`Emit ${n} element before broadcasting`)), * Stream.broadcast(2, 5), * Stream.flatMap(([first, second]) => * Effect.gen(function*() { * const fiber1 = yield* Stream.runFold(first, 0, (acc, e) => Math.max(acc, e)).pipe( * Effect.andThen((max) => Console.log(`Maximum: ${max}`)), * Effect.fork * ) * const fiber2 = yield* second.pipe( * Stream.schedule(Schedule.spaced("1 second")), * Stream.runForEach((n) => Console.log(`Logging to the Console: ${n}`)), * Effect.fork * ) * yield* Fiber.join(fiber1).pipe( * Effect.zip(Fiber.join(fiber2), { concurrent: true }) * ) * }) * ), * Stream.runCollect * ) * ) * * Effect.runPromise(numbers).then(console.log) * // Emit 1 element before broadcasting * // Emit 2 element before broadcasting * // Emit 3 element before broadcasting * // Emit 4 element before broadcasting * // Emit 5 element before broadcasting * // Emit 6 element before broadcasting * // Emit 7 element before broadcasting * // Emit 8 element before broadcasting * // Emit 9 element before broadcasting * // Emit 10 element before broadcasting * // Emit 11 element before broadcasting * // Logging to the Console: 1 * // Logging to the Console: 2 * // Logging to the Console: 3 * // Logging to the Console: 4 * // Logging to the Console: 5 * // Emit 12 element before broadcasting * // Emit 13 element before broadcasting * // Emit 14 element before broadcasting * // Emit 15 element before broadcasting * // Emit 16 element before broadcasting * // Logging to the Console: 6 * // Logging to the Console: 7 * // Logging to the Console: 8 * // Logging to the Console: 9 * // Logging to the Console: 10 * // Emit 17 element before broadcasting * // Emit 18 element before broadcasting * // Emit 19 element before broadcasting * // Emit 20 element before broadcasting * // Logging to the Console: 11 * // Logging to the Console: 12 * // Logging to the Console: 13 * // Logging to the Console: 14 * // Logging to the Console: 15 * // Maximum: 20 * // Logging to the Console: 16 * // Logging to the Console: 17 * // Logging to the Console: 18 * // Logging to the Console: 19 * // Logging to the Console: 20 * // { _id: 'Chunk', values: [ undefined ] } * ``` * * @since 2.0.0 * @category utils */ <N extends number>( n: N, maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): <A, E, R>(self: Stream<A, E, R>) => Effect.Effect<TupleOf<N, Stream<A, E>>, never, Scope.Scope | R> /** * Fan out the stream, producing a list of streams that have the same elements * as this stream. The driver stream will only ever advance the `maximumLag` * chunks before the slowest downstream stream. * * @example * ```ts * import { Console, Effect, Fiber, Schedule, Stream } from "effect" * * const numbers = Effect.scoped( * Stream.range(1, 20).pipe( * Stream.tap((n) => Console.log(`Emit ${n} element before broadcasting`)), * Stream.broadcast(2, 5), * Stream.flatMap(([first, second]) => * Effect.gen(function*() { * const fiber1 = yield* Stream.runFold(first, 0, (acc, e) => Math.max(acc, e)).pipe( * Effect.andThen((max) => Console.log(`Maximum: ${max}`)), * Effect.fork * ) * const fiber2 = yield* second.pipe( * Stream.schedule(Schedule.spaced("1 second")), * Stream.runForEach((n) => Console.log(`Logging to the Console: ${n}`)), * Effect.fork * ) * yield* Fiber.join(fiber1).pipe( * Effect.zip(Fiber.join(fiber2), { concurrent: true }) * ) * }) * ), * Stream.runCollect * ) * ) * * Effect.runPromise(numbers).then(console.log) * // Emit 1 element before broadcasting * // Emit 2 element before broadcasting * // Emit 3 element before broadcasting * // Emit 4 element before broadcasting * // Emit 5 element before broadcasting * // Emit 6 element before broadcasting * // Emit 7 element before broadcasting * // Emit 8 element before broadcasting * // Emit 9 element before broadcasting * // Emit 10 element before broadcasting * // Emit 11 element before broadcasting * // Logging to the Console: 1 * // Logging to the Console: 2 * // Logging to the Console: 3 * // Logging to the Console: 4 * // Logging to the Console: 5 * // Emit 12 element before broadcasting * // Emit 13 element before broadcasting * // Emit 14 element before broadcasting * // Emit 15 element before broadcasting * // Emit 16 element before broadcasting * // Logging to the Console: 6 * // Logging to the Console: 7 * // Logging to the Console: 8 * // Logging to the Console: 9 * // Logging to the Console: 10 * // Emit 17 element before broadcasting * // Emit 18 element before broadcasting * // Emit 19 element before broadcasting * // Emit 20 element before broadcasting * // Logging to the Console: 11 * // Logging to the Console: 12 * // Logging to the Console: 13 * // Logging to the Console: 14 * // Logging to the Console: 15 * // Maximum: 20 * // Logging to the Console: 16 * // Logging to the Console: 17 * // Logging to the Console: 18 * // Logging to the Console: 19 * // Logging to the Console: 20 * // { _id: 'Chunk', values: [ undefined ] } * ``` * * @since 2.0.0 * @category utils */ <A, E, R, N extends number>( self: Stream<A, E, R>, n: N, maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): Effect.Effect<TupleOf<N, Stream<A, E>>, never, Scope.Scope | R> } = internal.broadcast /** * Returns a new Stream that multicasts the original Stream, subscribing to it as soon as the first consumer subscribes. * As long as there is at least one consumer, the upstream will continue running and emitting data. * When all consumers have exited, the upstream will be finalized. * * @since 3.8.0 * @category utils */ export const share: { /** * Returns a new Stream that multicasts the original Stream, subscribing to it as soon as the first consumer subscribes. * As long as there is at least one consumer, the upstream will continue running and emitting data. * When all consumers have exited, the upstream will be finalized. * * @since 3.8.0 * @category utils */ <A, E>( config: { readonly capacity: "unbounded" readonly replay?: number | undefined readonly idleTimeToLive?: Duration.DurationInput | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined readonly idleTimeToLive?: Duration.DurationInput | undefined } ): <R>(self: Stream<A, E, R>) => Effect.Effect<Stream<A, E>, never, R | Scope.Scope> /** * Returns a new Stream that multicasts the original Stream, subscribing to it as soon as the first consumer subscribes. * As long as there is at least one consumer, the upstream will continue running and emitting data. * When all consumers have exited, the upstream will be finalized. * * @since 3.8.0 * @category utils */ <A, E, R>( self: Stream<A, E, R>, config: { readonly capacity: "unbounded" readonly replay?: number | undefined readonly idleTimeToLive?: Duration.DurationInput | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined readonly idleTimeToLive?: Duration.DurationInput | undefined } ): Effect.Effect<Stream<A, E>, never, R | Scope.Scope> } = internal.share /** * Fan out the stream, producing a dynamic number of streams that have the * same elements as this stream. The driver stream will only ever advance the * `maximumLag` chunks before the slowest downstream stream. * * @since 2.0.0 * @category utils */ export const broadcastDynamic: { /** * Fan out the stream, producing a dynamic number of streams that have the * same elements as this stream. The driver stream will only ever advance the * `maximumLag` chunks before the slowest downstream stream. * * @since 2.0.0 * @category utils */ ( maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): <A, E, R>(self: Stream<A, E, R>) => Effect.Effect<Stream<A, E>, never, Scope.Scope | R> /** * Fan out the stream, producing a dynamic number of streams that have the * same elements as this stream. The driver stream will only ever advance the * `maximumLag` chunks before the slowest downstream stream. * * @since 2.0.0 * @category utils */ <A, E, R>( self: Stream<A, E, R>, maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): Effect.Effect<Stream<A, E>, never, Scope.Scope | R> } = internal.broadcastDynamic /** * Converts the stream to a scoped list of queues. Every value will be * replicated to every queue with the slowest queue being allowed to buffer * `maximumLag` chunks before the driver is back pressured. * * Queues can unsubscribe from upstream by shutting down. * * @since 2.0.0 * @category utils */ export const broadcastedQueues: { /** * Converts the stream to a scoped list of queues. Every value will be * replicated to every queue with the slowest queue being allowed to buffer * `maximumLag` chunks before the driver is back pressured. * * Queues can unsubscribe from upstream by shutting down. * * @since 2.0.0 * @category utils */ <N extends number>( n: N, maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): <A, E, R>( self: Stream<A, E, R> ) => Effect.Effect<TupleOf<N, Queue.Dequeue<Take.Take<A, E>>>, never, Scope.Scope | R> /** * Converts the stream to a scoped list of queues. Every value will be * replicated to every queue with the slowest queue being allowed to buffer * `maximumLag` chunks before the driver is back pressured. * * Queues can unsubscribe from upstream by shutting down. * * @since 2.0.0 * @category utils */ <A, E, R, N extends number>( self: Stream<A, E, R>, n: N, maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): Effect.Effect<TupleOf<N, Queue.Dequeue<Take.Take<A, E>>>, never, Scope.Scope | R> } = internal.broadcastedQueues /** * Converts the stream to a scoped dynamic amount of queues. Every chunk will * be replicated to every queue with the slowest queue being allowed to buffer * `maximumLag` chunks before the driver is back pressured. * * Queues can unsubscribe from upstream by shutting down. * * @since 2.0.0 * @category utils */ export const broadcastedQueuesDynamic: { /** * Converts the stream to a scoped dynamic amount of queues. Every chunk will * be replicated to every queue with the slowest queue being allowed to buffer * `maximumLag` chunks before the driver is back pressured. * * Queues can unsubscribe from upstream by shutting down. * * @since 2.0.0 * @category utils */ ( maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): <A, E, R>( self: Stream<A, E, R> ) => Effect.Effect<Effect.Effect<Queue.Dequeue<Take.Take<A, E>>, never, Scope.Scope>, never, Scope.Scope | R> /** * Converts the stream to a scoped dynamic amount of queues. Every chunk will * be replicated to every queue with the slowest queue being allowed to buffer * `maximumLag` chunks before the driver is back pressured. * * Queues can unsubscribe from upstream by shutting down. * * @since 2.0.0 * @category utils */ <A, E, R>( self: Stream<A, E, R>, maximumLag: number | { readonly capacity: "unbounded"; readonly replay?: number | undefined } | { readonly capacity: number readonly strategy?: "sliding" | "dropping" | "suspend" | undefined readonly replay?: number | undefined } ): Effect.Effect<Effect.Effect<Queue.Dequeue<Take.Take<A, E>>, never, Scope.Scope>, never, Scope.Scope | R> } = internal.broadcastedQueuesDynamic /** * Allows a faster producer to progress independently of a slower consumer by * buffering up to `capacity` elements in a queue. * * Note: This combinator destroys the chunking structure. It's recommended to * use rechunk afterwards. Additionally, prefer capacities that are powers * of 2 for better performance. * * @example * ```ts * import { Console, Effect, Schedule, Stream } from "effect" * * const stream = Stream.range(1, 10).pipe( * Stream.tap((n) => Console.log(`before buffering: ${n}`)), * Stream.buffer({ capacity: 4 }), * Stream.tap((n) => Console.log(`after buffering: ${n}`)), * Stream.schedule(Schedule.spaced("5 seconds")) * ) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // before buffering: 1 * // before buffering: 2 * // before buffering: 3 * // before buffering: 4 * // before buffering: 5 * // before buffering: 6 * // after buffering: 1 * // after buffering: 2 * // before buffering: 7 * // after buffering: 3 * // before buffering: 8 * // after buffering: 4 * // before buffering: 9 * // after buffering: 5 * // before buffering: 10 * // ... * ``` * * @since 2.0.0 * @category utils */ export const buffer: { /** * Allows a faster producer to progress independently of a slower consumer by * buffering up to `capacity` elements in a queue. * * Note: This combinator destroys the chunking structure. It's recommended to * use rechunk afterwards. Additionally, prefer capacities that are powers * of 2 for better performance. * * @example * ```ts * import { Console, Effect, Schedule, Stream } from "effect" * * const stream = Stream.range(1, 10).pipe( * Stream.tap((n) => Console.log(`before buffering: ${n}`)), * Stream.buffer({ capacity: 4 }), * Stream.tap((n) => Console.log(`after buffering: ${n}`)), * Stream.schedule(Schedule.spaced("5 seconds")) * ) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // before buffering: 1 * // before buffering: 2 * // before buffering: 3 * // before buffering: 4 * // before buffering: 5 * // before buffering: 6 * // after buffering: 1 * // after buffering: 2 * // before buffering: 7 * // after buffering: 3 * // before buffering: 8 * // after buffering: 4 * // before buffering: 9 * // after buffering: 5 * // before buffering: 10 * // ... * ``` * * @since 2.0.0 * @category utils */ ( options: { readonly capacity: "unbounded" } | { readonly capacity: number readonly strategy?: "dropping" | "sliding" | "suspend" | undefined } ): <A, E, R>(self: Stream<A, E, R>) => Stream<A, E, R> /** * Allows a faster producer to progress independently of a slower consumer by * buffering up to `capacity` elements in a queue. * * Note: This combinator destroys the chunking structure. It's recommended to * use rechunk afterwards. Additionally, prefer capacities that are powers * of 2 for better performance. * * @example * ```ts * import { Console, Effect, Schedule, Stream } from "effect" * * const stream = Stream.range(1, 10).pipe( * Stream.tap((n) => Console.log(`before buffering: ${n}`)), * Stream.buffer({ capacity: 4 }), * Stream.tap((n) => Console.log(`after buffering: ${n}`)), * Stream.schedule(Schedule.spaced("5 seconds")) * ) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // before buffering: 1 * // before buffering: 2 * // before buffering: 3 * // before buffering: 4 * // before buffering: 5 * // before buffering: 6 * // after buffering: 1 * // after buffering: 2 * // before buffering: 7 * // after buffering: 3 * // before buffering: 8 * // after buffering: 4 * // before buffering: 9 * // after buffering: 5 * // before buffering: 10 * // ... * ``` * * @since 2.0.0 * @category utils */ <A, E, R>( self: Stream<A, E, R>, options: { readonly capacity: "unbounded" } | { readonly capacity: number readonly strategy?: "dropping" | "sliding" | "suspend" | undefined } ): Stream<A, E, R> } = internal.buffer /** * Allows a faster producer to progress independently of a slower consumer by * buffering up to `capacity` chunks in a queue. * * @note Prefer capacities that are powers of 2 for better performance. * @since 2.0.0 * @category utils */ export const bufferChunks: { /** * Allows a faster producer to progress independently of a slower consumer by * buffering up to `capacity` chunks in a queue. * * @note Prefer capacities that are powers of 2 for better performance. * @since 2.0.0 * @category utils */ ( options: { readonly capacity: number; readonly strategy?: "dropping" | "sliding" | "suspend" | undefined } ): <A, E, R>(self: Stream<A, E, R>) => Stream<A, E, R> /** * Allows a faster producer to progress independently of a slower consumer by * buffering up to `capacity` chunks in a queue. * * @note Prefer capacities that are powers of 2 for better performance. * @since 2.0.0 * @category utils */ <A, E, R>( self: Stream<A, E, R>, options: { readonly capacity: number; readonly strategy?: "dropping" | "sliding" | "suspend" | undefined } ): Stream<A, E, R> } = internal.bufferChunks /** * Switches over to the stream produced by the provided function in case this * one fails with a typed error. * * @since 2.0.0 * @category error handling */ export const catchAll: { /** * Switches over to the stream produced by the provided function in case this * one fails with a typed error. * * @since 2.0.0 * @category error handling */ <E, A2, E2, R2>(f: (error: E) => Stream<A2, E2, R2>): <A, R>(self: Stream<A, E, R>) => Stream<A2 | A, E2, R2 | R> /** * Switches over to the stream produced by the provided function in case this * one fails with a typed error. * * @since 2.0.0 * @category error handling */ <A, E, R, A2, E2, R2>(self: Stream<A, E, R>, f: (error: E) => Stream<A2, E2, R2>): Stream<A | A2, E2, R | R2> } = internal.catchAll /** * Switches over to the stream produced by the provided function in case this * one fails. Allows recovery from all causes of failure, including * interruption if the stream is uninterruptible. * * @since 2.0.0 * @category error handling */ export const catchAllCause: { /** * Switches over to the stream produced by the provided function in case this * one fails. Allows recovery from all causes of failure, including * interruption if the stream is uninterruptible. * * @since 2.0.0 * @category error handling */ <E, A2, E2, R2>(f: (cause: Cause.Cause<E>) => Stream<A2, E2, R2>): <A, R>(self: Stream<A, E, R>) => Stream<A2 | A, E2, R2 | R> /** * Switches over to the stream produced by the provided function in case this * one fails. Allows recovery from all causes of failure, including * interruption if the stream is uninterruptible. * * @since 2.0.0 * @category error handling */ <A, E, R, A2, E2, R2>(self: Stream<A, E, R>, f: (cause: Cause.Cause<E>) => Stream<A2, E2, R2>): Stream<A | A2, E2, R | R2> } = internal.catchAllCause /** * Switches over to the stream produced by the provided function in case this * one fails with some typed error. * * @since 2.0.0 * @category error handling */ export const catchSome: { /** * Switches over to the stream produced by the provided function in case this * one fails with some typed error. * * @since 2.0.0 * @category error handling */ <E, A2, E2, R2>(pf: (error: E) => Option.Option<Stream<A2, E2, R2>>): <A, R>(self: Stream<A, E, R>) => Stream<A2 | A, E | E2, R2 | R> /** * Switches over to the stream produced by the provided function in case this * one fails with some typed error. * * @since 2.0.0 * @category error handling */ <A, E, R, A2, E2, R2>(self: Stream<A, E, R>, pf: (error: E) => Option.Option<Stream<A2, E2, R2>>): Stream<A | A2, E | E2, R | R2> } = internal.catchSome /** * Switches over to the stream produced by the provided function in case this * one fails with an error matching the given `_tag`. * * @since 2.0.0 * @category error handling */ export const catchTag: { /** * Switches over to the stream produced by the provided function in case this * one fails with an error matching the given `_tag`. * * @since 2.0.0 * @category error handling */ <K extends E["_tag"] & string, E extends { _tag: string }, A1, E1, R1>(k: K, f: (e: Extract<E, { _tag: K }>) => Stream<A1, E1, R1>): <A, R>(self: Stream<A, E, R>) => Stream<A1 | A, E1 | Exclude<E, { _tag: K }>, R1 | R> /** * Switches over to the stream produced by the provided function in case this * one fails with an error matching the given `_tag`. * * @since 2.0.0 * @category error handling */ <A, E extends { _tag: string }, R, K extends E["_tag"] & string, A1, E1, R1>( self: Stream<A, E, R>, k: K, f: (e: Extract<E, { _tag: K }>) => Stream<A1, E1, R1> ): Stream<A | A1, E1 | Exclude<E, { _tag: K }>, R | R1> } = internal.catchTag /** * Switches over to the stream produced by one of the provided functions, in * case this one fails with an error matching one of the given `_tag`'s. * * @since 2.0.0 * @category error handling */ export const catchTags: { /** * Switches over to the stream produced by one of the provided functions, in * case this one fails with an error matching one of the given `_tag`'s. * * @since 2.0.0 * @category error handling */ < E extends { _tag: string }, Cases extends { [K in E["_tag"]]+?: (error: Extract<E, { _tag: K }>) => Stream<any, any, any> } >(cases: Cases): <A, R>( self: Stream<A, E, R> ) => Stream< | A | { [K in keyof Cases]: Cases[K] extends (...args: Array<any>) => Stream.Variance<infer A, infer _E, infer _R> ? A : never }[keyof Cases], | Exclude<E, { _tag: keyof Cases }> | { [K in keyof Cases]: Cases[K] extends (...args: Array<any>) => Stream.Variance<infer _A, infer E, infer _R> ? E : never }[keyof Cases], | R | { [K in keyof Cases]: Cases[K] extends (...args: Array<any>) => Stream.Variance<infer _A, infer _E, infer R> ? R : never }[keyof Cases] > /** * Switches over to the stream produced by one of the provided functions, in * case this one fails with an error matching one of the given `_tag`'s. * * @since 2.0.0 * @category error handling */ < A, E extends { _tag: string }, R, Cases extends { [K in E["_tag"]]+?: (error: Extract<E, { _tag: K }>) => Stream<any, any, any> } >(self: Stream<A, E, R>, cases: Cases): Stream< | A | { [K in keyof Cases]: Cases[K] extends (...args: Array<any>) => Stream.Variance<infer _R, infer _E, infer A> ? A : never }[keyof Cases], | Exclude<E, { _tag: keyof Cases }> | { [K in keyof Cases]: Cases[K] extends (...args: Array<any>) => Stream.Variance<infer _R, infer E, infer _A> ? E : never }[keyof Cases], | R | { [K in keyof Cases]: Cases[K] extends (...args: Array<any>) => Stream.Variance<infer R, infer _E, infer _A> ? R : never }[keyof Cases] > } = internal.catchTags /** * Switches over to the stream produced by the provided function in case this * one fails with some errors. Allows recovery from all causes of failure, * including interruption if the stream is uninterruptible. * * @since 2.0.0 * @category error handling */ export const catchSomeCause: { /** * Switches over to the stream produced by the provided function in case this * one fails with some errors. Allows recovery from all causes of failure, * including interruption if the stream is uninterruptible. * * @since 2.0.0 * @category error handling */ <E, A2, E2, R2>(pf: (cause: Cause.Cause<E>) => Option.Option<Stream<A2, E2, R2>>): <A, R>(self: Stream<A, E, R>) => Stream<A2 | A, E | E2, R2 | R> /** * Switches over to the stream produced by the provided function in case this * one fails with some errors. Allows recovery from all causes of failure, * including interruption if the stream is uninterruptible. * * @since 2.0.0 * @category error handling */ <A, E, R, A2, E2, R2>( self: Stream<A, E, R>, pf: (cause: Cause.Cause<E>) => Option.Option<Stream<A2, E2, R2>> ): Stream<A | A2, E | E2, R | R2> } = internal.catchSomeCause /** * Returns a new stream that only emits elements that are not equal to the * previous element emitted, using natural equality to determine whether two * elements are equal. * * @example * ```ts * import { Effect, Stream } from "effect" * * const stream = Stream.make(1, 1, 1, 2, 2, 3, 4).pipe(Stream.changes) * * Effect.runPromise(Stream.runCollect(stream)).then(console.log) * // { _id: 'Chunk', values: [ 1, 2, 3, 4 ] } * ``` * * @since 2.0.0 * @category utils */ export const changes: <A, E, R>(self: Stream<A, E, R>) => Stream<A, E, R> = internal.changes /** * Returns a new stream that only emits elements that are not equal to the * previous element emitted, using the specified function to determine whether * two elements are equal. * * @since 2.0.0 * @category utils */ export const changesWith: { /** * Returns a new stream that only emits elements that are not equal to the * previous element emitted, using the specified function to determine whether * two elements are equal. * * @since 2.0.0 * @category utils */ <A>(f: (x: A, y: A) => boolean): <E,