tone
Version:
A Web Audio framework for making interactive music in the browser.
1,299 lines • 1.13 MB
TypeScript
declare module Tone {
/**
* @class Tone.AmplitudeEnvelope is a Tone.Envelope connected to a gain node.
* Unlike Tone.Envelope, which outputs the envelope's value, Tone.AmplitudeEnvelope accepts
* an audio signal as the input and will apply the envelope to the amplitude
* of the signal. Read more about ADSR Envelopes on [Wikipedia](https://en.wikipedia.org/wiki/Synthesizer#ADSR_envelope).
*
* @constructor
* @extends {Tone.Envelope}
* @param {Time|Object} [attack] The amount of time it takes for the envelope to go from
* 0 to it's maximum value.
* @param {Time} [decay] The period of time after the attack that it takes for the envelope
* to fall to the sustain value.
* @param {NormalRange} [sustain] The percent of the maximum value that the envelope rests at until
* the release is triggered.
* @param {Time} [release] The amount of time after the release is triggered it takes to reach 0.
* @example
* var ampEnv = new Tone.AmplitudeEnvelope({
* "attack": 0.1,
* "decay": 0.2,
* "sustain": 1.0,
* "release": 0.8
* }).toMaster();
* //create an oscillator and connect it
* var osc = new Tone.Oscillator().connect(ampEnv).start();
* //trigger the envelopes attack and release "8t" apart
* ampEnv.triggerAttackRelease("8t");
*/
class AmplitudeEnvelope extends Tone.Envelope {
constructor(attack?: Time | any, decay?: Time, sustain?: NormalRange, release?: Time);
/**
* Clean up
* @return {Tone.AmplitudeEnvelope} this
*/
dispose(): Tone.AmplitudeEnvelope;
/**
* When triggerAttack is called, the attack time is the amount of
* time it takes for the envelope to reach it's maximum value.
* @type {Time}
*/
attack: Time;
/**
* After the attack portion of the envelope, the value will fall
* over the duration of the decay time to it's sustain value.
* @type {Time}
*/
decay: Time;
/**
* The sustain value is the value
* which the envelope rests at after triggerAttack is
* called, but before triggerRelease is invoked.
* @type {NormalRange}
*/
sustain: NormalRange;
/**
* After triggerRelease is called, the envelope's
* value will fall to it's miminum value over the
* duration of the release time.
* @type {Time}
*/
release: Time;
/**
* Read the current value of the envelope. Useful for
* syncronizing visual output to the envelope.
* @memberOf Tone.Envelope#
* @type {Number}
* @name value
* @readOnly
*/
readonly value: number;
/**
* The shape of the attack.
* Can be any of these strings:
* <ul>
* <li>linear</li>
* <li>exponential</li>
* <li>sine</li>
* <li>cosine</li>
* <li>bounce</li>
* <li>ripple</li>
* <li>step</li>
* </ul>
* Can also be an array which describes the curve. Values
* in the array are evenly subdivided and linearly
* interpolated over the duration of the attack.
* @memberOf Tone.Envelope#
* @type {String|Array}
* @name attackCurve
* @example
* env.attackCurve = "linear";
* @example
* //can also be an array
* env.attackCurve = [0, 0.2, 0.3, 0.4, 1]
*/
attackCurve: string | Array;
/**
* The shape of the release. See the attack curve types.
* @memberOf Tone.Envelope#
* @type {String|Array}
* @name releaseCurve
* @example
* env.releaseCurve = "linear";
*/
releaseCurve: string | Array;
/**
* The shape of the decay either "linear" or "exponential"
* @memberOf Tone.Envelope#
* @type {String}
* @name decayCurve
* @example
* env.decayCurve = "linear";
*/
decayCurve: string;
/**
* Trigger the attack/decay portion of the ADSR envelope.
* @param {Time} [time=now] When the attack should start.
* @param {NormalRange} [velocity=1] The velocity of the envelope scales the vales.
* number between 0-1
* @returns {Tone.Envelope} this
* @example
* //trigger the attack 0.5 seconds from now with a velocity of 0.2
* env.triggerAttack("+0.5", 0.2);
*/
triggerAttack(time?: Time, velocity?: NormalRange): Tone.Envelope;
/**
* Triggers the release of the envelope.
* @param {Time} [time=now] When the release portion of the envelope should start.
* @returns {Tone.Envelope} this
* @example
* //trigger release immediately
* env.triggerRelease();
*/
triggerRelease(time?: Time): Tone.Envelope;
/**
* Get the scheduled value at the given time. This will
* return the unconverted (raw) value.
* @param {Number} time The time in seconds.
* @return {Number} The scheduled value at the given time.
*/
getValueAtTime(time: number): number;
/**
* triggerAttackRelease is shorthand for triggerAttack, then waiting
* some duration, then triggerRelease.
* @param {Time} duration The duration of the sustain.
* @param {Time} [time=now] When the attack should be triggered.
* @param {number} [velocity=1] The velocity of the envelope.
* @returns {Tone.Envelope} this
* @example
* //trigger the attack and then the release after 0.6 seconds.
* env.triggerAttackRelease(0.6);
*/
triggerAttackRelease(duration: Time, time?: Time, velocity?: number): Tone.Envelope;
/**
* Cancels all scheduled envelope changes after the given time.
* @param {Time} after
* @returns {Tone.Envelope} this
*/
cancel(after: Time): Tone.Envelope;
/**
* Get the audio context belonging to this instance.
* @type {Tone.Context}
* @memberOf Tone.AudioNode#
* @name context
* @readOnly
*/
readonly context: Tone.Context;
/**
* channelCount is the number of channels used when up-mixing and down-mixing
* connections to any inputs to the node. The default value is 2 except for
* specific nodes where its value is specially determined.
*
* @memberof Tone.AudioNode#
* @type {Number}
* @name channelCount
* @readOnly
*/
readonly channelCount: number;
/**
* channelCountMode determines how channels will be counted when up-mixing and
* down-mixing connections to any inputs to the node.
* The default value is "max". This attribute has no effect for nodes with no inputs.
* @memberof Tone.AudioNode#
* @type {String}
* @name channelCountMode
* @readOnly
*/
readonly channelCountMode: string;
/**
* channelInterpretation determines how individual channels will be treated
* when up-mixing and down-mixing connections to any inputs to the node.
* The default value is "speakers".
* @memberof Tone.AudioNode#
* @type {String}
* @name channelInterpretation
* @readOnly
*/
readonly channelInterpretation: string;
/**
* The number of inputs feeding into the AudioNode.
* For source nodes, this will be 0.
* @type {Number}
* @name numberOfInputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfInputs: number;
/**
* The number of outputs coming out of the AudioNode.
* @type {Number}
* @name numberOfOutputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfOutputs: number;
/**
* disconnect the output
* @param {Number|AudioNode} output Either the output index to disconnect
* if the output is an array, or the
* node to disconnect from.
* @returns {Tone.AudioNode} this
*/
disconnect(output: number | AudioNode): Tone.AudioNode;
/**
* Connect the output of this node to the rest of the nodes in series.
* @example
* //connect a node to an effect, panVol and then to the master output
* node.chain(effect, panVol, Tone.Master);
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
chain(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* connect the output of this node to the rest of the nodes in parallel.
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
fan(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* Connect 'this' to the master output. Shorthand for this.connect(Tone.Master)
* @returns {Tone.AudioNode} this
* @example
* //connect an oscillator to the master output
* var osc = new Tone.Oscillator().toMaster();
*/
toMaster(): Tone.AudioNode;
/**
* Send this signal to the channel name.
* @param {String} channelName A named channel to send the signal to.
* @param {Decibels} amount The amount of the source to send to the bus.
* @return {GainNode} The gain node which connects this node to the desired channel.
* Can be used to adjust the levels of the send.
* @example
* source.send("reverb", -12);
*/
send(channelName: string, amount: Decibels): GainNode;
/**
* Receive the input from the desired channelName to the input
*
* @param {String} channelName A named channel to send the signal to.
* @param {Number=} channelNumber The channel to connect to
* @returns {Tone} this
* @example
* reverbEffect.receive("reverb");
*/
receive(channelName: string, channelNumber?: number): Tone;
/**
* Convert Time into seconds.
*
* Unlike the method which it overrides, this takes into account
* transporttime and musical notation.
*
* Time : 1.40
* Notation: 4n or 1m or 2t
* Now Relative: +3n
*
* @param {Time} time
* @return {Seconds}
*/
toSeconds(time: Time): Seconds;
/**
* Convert a frequency representation into a number.
* @param {Frequency} freq
* @return {Hertz} the frequency in hertz
*/
toFrequency(freq: Frequency): Hertz;
/**
* Convert a time representation into ticks.
* @param {Time} time
* @return {Ticks} the time in ticks
*/
toTicks(time: Time): Ticks;
}
/**
* @class Wrapper around the native Web Audio's
* [AnalyserNode](http://webaudio.github.io/web-audio-api/#idl-def-AnalyserNode).
* Extracts FFT or Waveform data from the incoming signal.
* @extends {Tone.AudioNode}
* @param {String=} type The return type of the analysis, either "fft", or "waveform".
* @param {Number=} size The size of the FFT. Value must be a power of
* two in the range 16 to 16384.
*/
class Analyser extends Tone.AudioNode {
constructor(type?: string, size?: number);
/**
* The default values.
* @type {Object}
* @const
*/
static readonly defaults: any;
/**
* Run the analysis given the current settings and return the
* result as a TypedArray of length [size](#size).
* @returns {TypedArray}
*/
getValue(): TypedArray;
/**
* The size of analysis. This must be a power of two in the range 16 to 16384.
* @memberOf Tone.Analyser#
* @type {Number}
* @name size
*/
size: number;
/**
* The analysis function returned by analyser.getValue(), either "fft" or "waveform".
* @memberOf Tone.Analyser#
* @type {String}
* @name type
*/
type: string;
/**
* 0 represents no time averaging with the last analysis frame.
* @memberOf Tone.Analyser#
* @type {NormalRange}
* @name smoothing
*/
smoothing: NormalRange;
/**
* Clean up.
* @return {Tone.Analyser} this
*/
dispose(): Tone.Analyser;
/**
* Get the audio context belonging to this instance.
* @type {Tone.Context}
* @memberOf Tone.AudioNode#
* @name context
* @readOnly
*/
readonly context: Tone.Context;
/**
* channelCount is the number of channels used when up-mixing and down-mixing
* connections to any inputs to the node. The default value is 2 except for
* specific nodes where its value is specially determined.
*
* @memberof Tone.AudioNode#
* @type {Number}
* @name channelCount
* @readOnly
*/
readonly channelCount: number;
/**
* channelCountMode determines how channels will be counted when up-mixing and
* down-mixing connections to any inputs to the node.
* The default value is "max". This attribute has no effect for nodes with no inputs.
* @memberof Tone.AudioNode#
* @type {String}
* @name channelCountMode
* @readOnly
*/
readonly channelCountMode: string;
/**
* channelInterpretation determines how individual channels will be treated
* when up-mixing and down-mixing connections to any inputs to the node.
* The default value is "speakers".
* @memberof Tone.AudioNode#
* @type {String}
* @name channelInterpretation
* @readOnly
*/
readonly channelInterpretation: string;
/**
* The number of inputs feeding into the AudioNode.
* For source nodes, this will be 0.
* @type {Number}
* @name numberOfInputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfInputs: number;
/**
* The number of outputs coming out of the AudioNode.
* @type {Number}
* @name numberOfOutputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfOutputs: number;
/**
* connect the output of a ToneNode to an AudioParam, AudioNode, or ToneNode
* @param {Tone | AudioParam | AudioNode} unit
* @param {number} [outputNum=0] optionally which output to connect from
* @param {number} [inputNum=0] optionally which input to connect to
* @returns {Tone.AudioNode} this
*/
connect(unit: Tone | AudioParam | AudioNode, outputNum?: number, inputNum?: number): Tone.AudioNode;
/**
* disconnect the output
* @param {Number|AudioNode} output Either the output index to disconnect
* if the output is an array, or the
* node to disconnect from.
* @returns {Tone.AudioNode} this
*/
disconnect(output: number | AudioNode): Tone.AudioNode;
/**
* Connect the output of this node to the rest of the nodes in series.
* @example
* //connect a node to an effect, panVol and then to the master output
* node.chain(effect, panVol, Tone.Master);
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
chain(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* connect the output of this node to the rest of the nodes in parallel.
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
fan(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* Connect 'this' to the master output. Shorthand for this.connect(Tone.Master)
* @returns {Tone.AudioNode} this
* @example
* //connect an oscillator to the master output
* var osc = new Tone.Oscillator().toMaster();
*/
toMaster(): Tone.AudioNode;
/**
* Send this signal to the channel name.
* @param {String} channelName A named channel to send the signal to.
* @param {Decibels} amount The amount of the source to send to the bus.
* @return {GainNode} The gain node which connects this node to the desired channel.
* Can be used to adjust the levels of the send.
* @example
* source.send("reverb", -12);
*/
send(channelName: string, amount: Decibels): GainNode;
/**
* Receive the input from the desired channelName to the input
*
* @param {String} channelName A named channel to send the signal to.
* @param {Number=} channelNumber The channel to connect to
* @returns {Tone} this
* @example
* reverbEffect.receive("reverb");
*/
receive(channelName: string, channelNumber?: number): Tone;
/**
* Convert Time into seconds.
*
* Unlike the method which it overrides, this takes into account
* transporttime and musical notation.
*
* Time : 1.40
* Notation: 4n or 1m or 2t
* Now Relative: +3n
*
* @param {Time} time
* @return {Seconds}
*/
toSeconds(time: Time): Seconds;
/**
* Convert a frequency representation into a number.
* @param {Frequency} freq
* @return {Hertz} the frequency in hertz
*/
toFrequency(freq: Frequency): Hertz;
/**
* Convert a time representation into ticks.
* @param {Time} time
* @return {Ticks} the time in ticks
*/
toTicks(time: Time): Ticks;
}
module Analyser {
/**
* Possible return types of analyser.getValue()
* @enum {String}
*/
enum Type {
Waveform,
FFT
}
}
/**
* @class Tone.Channel provides a channel strip interface with
* volume, pan, solo and mute controls.
*
* @extends {Tone.AudioNode}
* @constructor
* @param {Decibels} volume The output volume.
* @param {AudioRange} pan the initial pan
* @example
* //pan the incoming signal left and drop the volume
* var channel = new Tone.Channel(-0.25, -12);
*/
class Channel extends Tone.AudioNode {
constructor(volume: Decibels, pan: AudioRange);
/**
* The L/R panning control.
* @type {AudioRange}
* @signal
*/
pan: AudioRange;
/**
* The volume control in decibels.
* @type {Decibels}
* @signal
*/
volume: Decibels;
/**
* The defaults
* @type {Object}
* @const
* @static
*/
static readonly defaults: any;
/**
* Solo/unsolo the channel. Soloing is only relative to other
* Tone.Channels and Tone.Solos.
* @memberOf Tone.Channel#
* @name solo
* @type {Boolean}
*/
solo: boolean;
/**
* If the current instance is muted, i.e. another instance is soloed,
* or the channel is muted
* @memberOf Tone.Channel#
* @type {Boolean}
* @name muted
* @readOnly
*/
readonly muted: boolean;
/**
* Mute/unmute the volume
* @memberOf Tone.Channel#
* @name mute
* @type {Boolean}
*/
mute: boolean;
/**
* clean up
* @returns {Tone.Channel} this
*/
dispose(): Tone.Channel;
/**
* Get the audio context belonging to this instance.
* @type {Tone.Context}
* @memberOf Tone.AudioNode#
* @name context
* @readOnly
*/
readonly context: Tone.Context;
/**
* channelCount is the number of channels used when up-mixing and down-mixing
* connections to any inputs to the node. The default value is 2 except for
* specific nodes where its value is specially determined.
*
* @memberof Tone.AudioNode#
* @type {Number}
* @name channelCount
* @readOnly
*/
readonly channelCount: number;
/**
* channelCountMode determines how channels will be counted when up-mixing and
* down-mixing connections to any inputs to the node.
* The default value is "max". This attribute has no effect for nodes with no inputs.
* @memberof Tone.AudioNode#
* @type {String}
* @name channelCountMode
* @readOnly
*/
readonly channelCountMode: string;
/**
* channelInterpretation determines how individual channels will be treated
* when up-mixing and down-mixing connections to any inputs to the node.
* The default value is "speakers".
* @memberof Tone.AudioNode#
* @type {String}
* @name channelInterpretation
* @readOnly
*/
readonly channelInterpretation: string;
/**
* The number of inputs feeding into the AudioNode.
* For source nodes, this will be 0.
* @type {Number}
* @name numberOfInputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfInputs: number;
/**
* The number of outputs coming out of the AudioNode.
* @type {Number}
* @name numberOfOutputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfOutputs: number;
/**
* connect the output of a ToneNode to an AudioParam, AudioNode, or ToneNode
* @param {Tone | AudioParam | AudioNode} unit
* @param {number} [outputNum=0] optionally which output to connect from
* @param {number} [inputNum=0] optionally which input to connect to
* @returns {Tone.AudioNode} this
*/
connect(unit: Tone | AudioParam | AudioNode, outputNum?: number, inputNum?: number): Tone.AudioNode;
/**
* disconnect the output
* @param {Number|AudioNode} output Either the output index to disconnect
* if the output is an array, or the
* node to disconnect from.
* @returns {Tone.AudioNode} this
*/
disconnect(output: number | AudioNode): Tone.AudioNode;
/**
* Connect the output of this node to the rest of the nodes in series.
* @example
* //connect a node to an effect, panVol and then to the master output
* node.chain(effect, panVol, Tone.Master);
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
chain(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* connect the output of this node to the rest of the nodes in parallel.
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
fan(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* Connect 'this' to the master output. Shorthand for this.connect(Tone.Master)
* @returns {Tone.AudioNode} this
* @example
* //connect an oscillator to the master output
* var osc = new Tone.Oscillator().toMaster();
*/
toMaster(): Tone.AudioNode;
/**
* Send this signal to the channel name.
* @param {String} channelName A named channel to send the signal to.
* @param {Decibels} amount The amount of the source to send to the bus.
* @return {GainNode} The gain node which connects this node to the desired channel.
* Can be used to adjust the levels of the send.
* @example
* source.send("reverb", -12);
*/
send(channelName: string, amount: Decibels): GainNode;
/**
* Receive the input from the desired channelName to the input
*
* @param {String} channelName A named channel to send the signal to.
* @param {Number=} channelNumber The channel to connect to
* @returns {Tone} this
* @example
* reverbEffect.receive("reverb");
*/
receive(channelName: string, channelNumber?: number): Tone;
/**
* Convert Time into seconds.
*
* Unlike the method which it overrides, this takes into account
* transporttime and musical notation.
*
* Time : 1.40
* Notation: 4n or 1m or 2t
* Now Relative: +3n
*
* @param {Time} time
* @return {Seconds}
*/
toSeconds(time: Time): Seconds;
/**
* Convert a frequency representation into a number.
* @param {Frequency} freq
* @return {Hertz} the frequency in hertz
*/
toFrequency(freq: Frequency): Hertz;
/**
* Convert a time representation into ticks.
* @param {Time} time
* @return {Ticks} the time in ticks
*/
toTicks(time: Time): Ticks;
}
/**
* @class Tone.Compressor is a thin wrapper around the Web Audio
* [DynamicsCompressorNode](http://webaudio.github.io/web-audio-api/#the-dynamicscompressornode-interface).
* Compression reduces the volume of loud sounds or amplifies quiet sounds
* by narrowing or "compressing" an audio signal's dynamic range.
* Read more on [Wikipedia](https://en.wikipedia.org/wiki/Dynamic_range_compression).
*
* @extends {Tone.AudioNode}
* @constructor
* @param {Decibels|Object} [threshold] The value above which the compression starts to be applied.
* @param {Positive} [ratio] The gain reduction ratio.
* @example
* var comp = new Tone.Compressor(-30, 3);
*/
class Compressor extends Tone.AudioNode {
constructor(threshold?: Decibels | any, ratio?: Positive);
/**
* the threshold vaue
* @type {Decibels}
* @signal
*/
threshold: Decibels;
/**
* The attack parameter
* @type {Time}
* @signal
*/
attack: Time;
/**
* The release parameter
* @type {Time}
* @signal
*/
release: Time;
/**
* The knee parameter
* @type {Decibels}
* @signal
*/
knee: Decibels;
/**
* The ratio value
* @type {Number}
* @signal
*/
ratio: number;
/**
* @static
* @const
* @type {Object}
*/
static readonly defaults: any;
/**
* clean up
* @returns {Tone.Compressor} this
*/
dispose(): Tone.Compressor;
/**
* Get the audio context belonging to this instance.
* @type {Tone.Context}
* @memberOf Tone.AudioNode#
* @name context
* @readOnly
*/
readonly context: Tone.Context;
/**
* channelCount is the number of channels used when up-mixing and down-mixing
* connections to any inputs to the node. The default value is 2 except for
* specific nodes where its value is specially determined.
*
* @memberof Tone.AudioNode#
* @type {Number}
* @name channelCount
* @readOnly
*/
readonly channelCount: number;
/**
* channelCountMode determines how channels will be counted when up-mixing and
* down-mixing connections to any inputs to the node.
* The default value is "max". This attribute has no effect for nodes with no inputs.
* @memberof Tone.AudioNode#
* @type {String}
* @name channelCountMode
* @readOnly
*/
readonly channelCountMode: string;
/**
* channelInterpretation determines how individual channels will be treated
* when up-mixing and down-mixing connections to any inputs to the node.
* The default value is "speakers".
* @memberof Tone.AudioNode#
* @type {String}
* @name channelInterpretation
* @readOnly
*/
readonly channelInterpretation: string;
/**
* The number of inputs feeding into the AudioNode.
* For source nodes, this will be 0.
* @type {Number}
* @name numberOfInputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfInputs: number;
/**
* The number of outputs coming out of the AudioNode.
* @type {Number}
* @name numberOfOutputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfOutputs: number;
/**
* connect the output of a ToneNode to an AudioParam, AudioNode, or ToneNode
* @param {Tone | AudioParam | AudioNode} unit
* @param {number} [outputNum=0] optionally which output to connect from
* @param {number} [inputNum=0] optionally which input to connect to
* @returns {Tone.AudioNode} this
*/
connect(unit: Tone | AudioParam | AudioNode, outputNum?: number, inputNum?: number): Tone.AudioNode;
/**
* disconnect the output
* @param {Number|AudioNode} output Either the output index to disconnect
* if the output is an array, or the
* node to disconnect from.
* @returns {Tone.AudioNode} this
*/
disconnect(output: number | AudioNode): Tone.AudioNode;
/**
* Connect the output of this node to the rest of the nodes in series.
* @example
* //connect a node to an effect, panVol and then to the master output
* node.chain(effect, panVol, Tone.Master);
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
chain(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* connect the output of this node to the rest of the nodes in parallel.
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
fan(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* Connect 'this' to the master output. Shorthand for this.connect(Tone.Master)
* @returns {Tone.AudioNode} this
* @example
* //connect an oscillator to the master output
* var osc = new Tone.Oscillator().toMaster();
*/
toMaster(): Tone.AudioNode;
/**
* Send this signal to the channel name.
* @param {String} channelName A named channel to send the signal to.
* @param {Decibels} amount The amount of the source to send to the bus.
* @return {GainNode} The gain node which connects this node to the desired channel.
* Can be used to adjust the levels of the send.
* @example
* source.send("reverb", -12);
*/
send(channelName: string, amount: Decibels): GainNode;
/**
* Receive the input from the desired channelName to the input
*
* @param {String} channelName A named channel to send the signal to.
* @param {Number=} channelNumber The channel to connect to
* @returns {Tone} this
* @example
* reverbEffect.receive("reverb");
*/
receive(channelName: string, channelNumber?: number): Tone;
/**
* Convert Time into seconds.
*
* Unlike the method which it overrides, this takes into account
* transporttime and musical notation.
*
* Time : 1.40
* Notation: 4n or 1m or 2t
* Now Relative: +3n
*
* @param {Time} time
* @return {Seconds}
*/
toSeconds(time: Time): Seconds;
/**
* Convert a frequency representation into a number.
* @param {Frequency} freq
* @return {Hertz} the frequency in hertz
*/
toFrequency(freq: Frequency): Hertz;
/**
* Convert a time representation into ticks.
* @param {Time} time
* @return {Ticks} the time in ticks
*/
toTicks(time: Time): Ticks;
}
/**
* @class Tone.Crossfade provides equal power fading between two inputs.
* More on crossfading technique [here](https://en.wikipedia.org/wiki/Fade_(audio_engineering)#Crossfading).
*
* @constructor
* @extends {Tone.AudioNode}
* @param {NormalRange} [initialFade=0.5]
* @example
* var crossFade = new Tone.CrossFade(0.5);
* //connect effect A to crossfade from
* //effect output 0 to crossfade input 0
* effectA.connect(crossFade, 0, 0);
* //connect effect B to crossfade from
* //effect output 0 to crossfade input 1
* effectB.connect(crossFade, 0, 1);
* crossFade.fade.value = 0;
* // ^ only effectA is output
* crossFade.fade.value = 1;
* // ^ only effectB is output
* crossFade.fade.value = 0.5;
* // ^ the two signals are mixed equally.
*/
class CrossFade extends Tone.AudioNode {
constructor(initialFade?: NormalRange);
/**
* Alias for <code>input[0]</code>.
* @type {Tone.Gain}
*/
a: Tone.Gain;
/**
* Alias for <code>input[1]</code>.
* @type {Tone.Gain}
*/
b: Tone.Gain;
/**
* The mix between the two inputs. A fade value of 0
* will output 100% <code>input[0]</code> and
* a value of 1 will output 100% <code>input[1]</code>.
* @type {NormalRange}
* @signal
*/
fade: NormalRange;
/**
* clean up
* @returns {Tone.CrossFade} this
*/
dispose(): Tone.CrossFade;
/**
* Get the audio context belonging to this instance.
* @type {Tone.Context}
* @memberOf Tone.AudioNode#
* @name context
* @readOnly
*/
readonly context: Tone.Context;
/**
* channelCount is the number of channels used when up-mixing and down-mixing
* connections to any inputs to the node. The default value is 2 except for
* specific nodes where its value is specially determined.
*
* @memberof Tone.AudioNode#
* @type {Number}
* @name channelCount
* @readOnly
*/
readonly channelCount: number;
/**
* channelCountMode determines how channels will be counted when up-mixing and
* down-mixing connections to any inputs to the node.
* The default value is "max". This attribute has no effect for nodes with no inputs.
* @memberof Tone.AudioNode#
* @type {String}
* @name channelCountMode
* @readOnly
*/
readonly channelCountMode: string;
/**
* channelInterpretation determines how individual channels will be treated
* when up-mixing and down-mixing connections to any inputs to the node.
* The default value is "speakers".
* @memberof Tone.AudioNode#
* @type {String}
* @name channelInterpretation
* @readOnly
*/
readonly channelInterpretation: string;
/**
* The number of inputs feeding into the AudioNode.
* For source nodes, this will be 0.
* @type {Number}
* @name numberOfInputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfInputs: number;
/**
* The number of outputs coming out of the AudioNode.
* @type {Number}
* @name numberOfOutputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfOutputs: number;
/**
* connect the output of a ToneNode to an AudioParam, AudioNode, or ToneNode
* @param {Tone | AudioParam | AudioNode} unit
* @param {number} [outputNum=0] optionally which output to connect from
* @param {number} [inputNum=0] optionally which input to connect to
* @returns {Tone.AudioNode} this
*/
connect(unit: Tone | AudioParam | AudioNode, outputNum?: number, inputNum?: number): Tone.AudioNode;
/**
* disconnect the output
* @param {Number|AudioNode} output Either the output index to disconnect
* if the output is an array, or the
* node to disconnect from.
* @returns {Tone.AudioNode} this
*/
disconnect(output: number | AudioNode): Tone.AudioNode;
/**
* Connect the output of this node to the rest of the nodes in series.
* @example
* //connect a node to an effect, panVol and then to the master output
* node.chain(effect, panVol, Tone.Master);
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
chain(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* connect the output of this node to the rest of the nodes in parallel.
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
fan(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* Connect 'this' to the master output. Shorthand for this.connect(Tone.Master)
* @returns {Tone.AudioNode} this
* @example
* //connect an oscillator to the master output
* var osc = new Tone.Oscillator().toMaster();
*/
toMaster(): Tone.AudioNode;
/**
* Send this signal to the channel name.
* @param {String} channelName A named channel to send the signal to.
* @param {Decibels} amount The amount of the source to send to the bus.
* @return {GainNode} The gain node which connects this node to the desired channel.
* Can be used to adjust the levels of the send.
* @example
* source.send("reverb", -12);
*/
send(channelName: string, amount: Decibels): GainNode;
/**
* Receive the input from the desired channelName to the input
*
* @param {String} channelName A named channel to send the signal to.
* @param {Number=} channelNumber The channel to connect to
* @returns {Tone} this
* @example
* reverbEffect.receive("reverb");
*/
receive(channelName: string, channelNumber?: number): Tone;
/**
* Convert Time into seconds.
*
* Unlike the method which it overrides, this takes into account
* transporttime and musical notation.
*
* Time : 1.40
* Notation: 4n or 1m or 2t
* Now Relative: +3n
*
* @param {Time} time
* @return {Seconds}
*/
toSeconds(time: Time): Seconds;
/**
* Convert a frequency representation into a number.
* @param {Frequency} freq
* @return {Hertz} the frequency in hertz
*/
toFrequency(freq: Frequency): Hertz;
/**
* Convert a time representation into ticks.
* @param {Time} time
* @return {Ticks} the time in ticks
*/
toTicks(time: Time): Ticks;
}
/**
* @class Tone.EQ3 is a three band EQ with control over low, mid, and high gain as
* well as the low and high crossover frequencies.
*
* @constructor
* @extends {Tone.AudioNode}
*
* @param {Decibels|Object} [lowLevel] The gain applied to the lows.
* @param {Decibels} [midLevel] The gain applied to the mid.
* @param {Decibels} [highLevel] The gain applied to the high.
* @example
* var eq = new Tone.EQ3(-10, 3, -20);
*/
class EQ3 extends Tone.AudioNode {
constructor(lowLevel?: Decibels | any, midLevel?: Decibels, highLevel?: Decibels);
/**
* The gain in decibels of the low part
* @type {Decibels}
* @signal
*/
low: Decibels;
/**
* The gain in decibels of the mid part
* @type {Decibels}
* @signal
*/
mid: Decibels;
/**
* The gain in decibels of the high part
* @type {Decibels}
* @signal
*/
high: Decibels;
/**
* The Q value for all of the filters.
* @type {Positive}
* @signal
*/
Q: Positive;
/**
* The low/mid crossover frequency.
* @type {Frequency}
* @signal
*/
lowFrequency: Frequency;
/**
* The mid/high crossover frequency.
* @type {Frequency}
* @signal
*/
highFrequency: Frequency;
/**
* the default values
*/
static defaults: any;
/**
* clean up
* @returns {Tone.EQ3} this
*/
dispose(): Tone.EQ3;
/**
* Get the audio context belonging to this instance.
* @type {Tone.Context}
* @memberOf Tone.AudioNode#
* @name context
* @readOnly
*/
readonly context: Tone.Context;
/**
* channelCount is the number of channels used when up-mixing and down-mixing
* connections to any inputs to the node. The default value is 2 except for
* specific nodes where its value is specially determined.
*
* @memberof Tone.AudioNode#
* @type {Number}
* @name channelCount
* @readOnly
*/
readonly channelCount: number;
/**
* channelCountMode determines how channels will be counted when up-mixing and
* down-mixing connections to any inputs to the node.
* The default value is "max". This attribute has no effect for nodes with no inputs.
* @memberof Tone.AudioNode#
* @type {String}
* @name channelCountMode
* @readOnly
*/
readonly channelCountMode: string;
/**
* channelInterpretation determines how individual channels will be treated
* when up-mixing and down-mixing connections to any inputs to the node.
* The default value is "speakers".
* @memberof Tone.AudioNode#
* @type {String}
* @name channelInterpretation
* @readOnly
*/
readonly channelInterpretation: string;
/**
* The number of inputs feeding into the AudioNode.
* For source nodes, this will be 0.
* @type {Number}
* @name numberOfInputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfInputs: number;
/**
* The number of outputs coming out of the AudioNode.
* @type {Number}
* @name numberOfOutputs
* @memberof Tone.AudioNode#
* @readOnly
*/
readonly numberOfOutputs: number;
/**
* connect the output of a ToneNode to an AudioParam, AudioNode, or ToneNode
* @param {Tone | AudioParam | AudioNode} unit
* @param {number} [outputNum=0] optionally which output to connect from
* @param {number} [inputNum=0] optionally which input to connect to
* @returns {Tone.AudioNode} this
*/
connect(unit: Tone | AudioParam | AudioNode, outputNum?: number, inputNum?: number): Tone.AudioNode;
/**
* disconnect the output
* @param {Number|AudioNode} output Either the output index to disconnect
* if the output is an array, or the
* node to disconnect from.
* @returns {Tone.AudioNode} this
*/
disconnect(output: number | AudioNode): Tone.AudioNode;
/**
* Connect the output of this node to the rest of the nodes in series.
* @example
* //connect a node to an effect, panVol and then to the master output
* node.chain(effect, panVol, Tone.Master);
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
chain(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* connect the output of this node to the rest of the nodes in parallel.
* @param {...(AudioParam|Tone|AudioNode)} nodes
* @returns {Tone.AudioNode} this
*/
fan(...nodes: (AudioParam | Tone | AudioNode)[]): Tone.AudioNode;
/**
* Connect 'this' to the master output. Shorthand for this.connect(Tone.Master)
* @returns {Tone.AudioNode} this
* @example
* //connect an oscillator to the master output
* var osc = new Tone.Oscillator().toMaster();
*/
toMaster(): Tone.AudioNode;
/**
* Send this signal to the channel name.
* @param {String} channelName A named channel to send the signal to.
* @param {Decibels} amount The amount of the source to send to the bus.
* @return {GainNode} The gain node which connects this node to the desired channel.
* Can be used to adjust the levels of the send.
* @example
* source.send("reverb", -12);
*/
send(channelName: string, amount: Decibels): GainNode;
/**
* Receive the input from the desired channelName to the input
*
* @param {String} channelName A named channel to send the signal to.
* @param {Number=} channelNumber The channel to connect to
* @returns {Tone} this
* @example
* reverbEffect.receive("reverb");
*/
receive(channelName: string, channelNumber?: number): Tone;
/**
* Convert Time into seconds.
*
* Unlike the method which it overrides, this takes into account
* transporttime and musical notation.
*
* Time : 1.40
* Notation: 4n or 1m or 2t
* Now Relative: +3n
*
* @param {Time} time
* @return {Seconds}
*/
toSeconds(time: Time): Seconds;
/**
* Convert a frequency representation into a number.
* @param {Frequency} freq
* @return {Hertz} the frequency in hertz
*/
toFrequency(freq: Frequency): Hertz;
/**
* Convert a time representation into ticks.
* @param {Time} time
* @return {Ticks} the time in ticks
*/
toTicks(time: Time): Ticks;
}
/**
* @class Tone.Envelope is an [ADSR](https://en.wikipedia.org/wiki/Synthesizer#ADSR_envelope)
* envelope generator. Tone.Envelope outputs a signal which
* can be connected to an AudioParam or Tone.Signal.
* <img src="https://upload.wikimedia.org/wikipedia/commons/e/ea/ADSR_parameter.svg">
*
* @constructor
* @extends {Tone.AudioNode}
* @param {Time} [attack] The amount of time it takes for the envelope to go from
* 0 to it's maximum value.
* @param {Time} [decay] The period of time after the attack that it takes for the envelope
* to fall