parser-transform
Version:
Streaming+Async lexer and parser
137 lines (121 loc) • 4.21 kB
JavaScript
// Generated by CoffeeScript 2.3.2
(function() {
// A starter JSON lexer
var LongReadable, Stream, Tee, asyncjson_parser, expect, fs, json_lexer, json_parser;
({expect} = require('chai'));
fs = require('fs');
json_lexer = fs.readFileSync('test/json.l', 'utf8');
json_parser = fs.readFileSync('test/json.y', 'utf8');
asyncjson_parser = fs.readFileSync('test/json-async.y', 'utf8');
Stream = require('stream');
Tee = class Tee extends Stream.Transform {
constructor(options = {}) {
options.objectMode = true;
super(options);
}
_transform(chunk, encoding, next) {
console.log(chunk);
this.push(chunk);
next();
}
};
LongReadable = class LongReadable extends Stream.Readable {
constructor(options) {
super(options);
this.count = 0;
}
_read(size) {
switch (this.count++) {
case 0:
this.push('[');
break;
case 21:
this.push('4');
break;
case 22:
this.push(']');
break;
case 23:
setTimeout((() => {
return this.push(null);
}), 60);
break;
default:
setTimeout((() => {
return this.push('"yes",');
}), 50);
}
}
};
describe('The module', function() {
var Grammar, LexerParser, LexerTransform, ParserTransform, dfas, grammar, grammar2;
({LexerParser, LexerTransform, Grammar, ParserTransform} = require('..'));
dfas = null;
grammar = null;
grammar2 = null;
it('should build a lexer', function() {
return dfas = LexerParser.parse(json_lexer);
});
it('should build a grammar', function() {
return grammar = Grammar.fromString(json_parser, {
mode: 'LALR1'
}, 'bnf');
});
it('should build another grammar', function() {
return grammar2 = Grammar.fromString(asyncjson_parser, {
mode: 'LALR1'
}, 'bnf');
});
return it('should parse two concurrent streams', function(done) {
var counted, expected_rows, r, s, streamLexer, streamLexer2, streamLexer3, streamParser, streamParser2, streamParser3, w;
// Test three concurrent streams
// A stream is read from `package.json`
s = fs.createReadStream('./package.json', 'utf8');
s.setEncoding('utf8');
streamLexer = new LexerTransform(dfas);
streamParser = new ParserTransform(grammar);
// The "background" stream is generated by code.
w = new LongReadable;
w.setEncoding('utf8');
streamLexer2 = new LexerTransform(dfas);
streamParser2 = new ParserTransform(grammar);
// A third stream is read from `rows.json`
r = fs.createReadStream('./test/rows.json', 'utf8');
r.setEncoding('utf8');
streamLexer3 = new LexerTransform(dfas);
streamParser3 = new ParserTransform(grammar2);
// All three streams are expected to complete.
counted = 3;
s.pipe(streamLexer).pipe(streamParser).on('data', function(data) {
expect(data).to.have.property('name', 'parser-transform');
expect(data).to.have.property('scripts').with.property('test');
if (--counted === 0) {
return done();
}
});
// .pipe new Tee # use this to see chunks generated by the lexer
// .pipe new Tee # use this to see chunks generated by the parser
w.pipe(streamLexer2).pipe(streamParser2).on('data', function(data) {
expect(data).to.have.length(21);
expect(data).to.have.property(0, 'yes');
expect(data).to.have.property(1, 'yes');
expect(data).to.have.property(20, 4);
if (--counted === 0) {
return done();
}
});
expected_rows = 3;
r.pipe(streamLexer3).pipe(streamParser3).on('data', function(data) {
expect(data).to.have.property('prefix').with.length(2);
expect(data).to.have.property('prefix').with.property(0, 'rows');
expect(data).to.have.property('value').with.property('_id');
if (--expected_rows !== 0) {
return;
}
if (--counted === 0) {
return done();
}
});
});
});
}).call(this);