molstar
Version:
A comprehensive macromolecular library.
282 lines • 15.9 kB
JavaScript
"use strict";
/**
* Copyright (c) 2019 mol* contributors, licensed under MIT, See LICENSE file for more info.
*
* @author Alexander Rose <alexander.rose@weirdbyte.de>
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.parsePsf = void 0;
var tslib_1 = require("tslib");
var mol_task_1 = require("../../../mol-task");
var tokenizer_1 = require("../common/text/tokenizer");
var result_1 = require("../result");
var token_1 = require("../common/text/column/token");
var db_1 = require("../../../mol-data/db");
var readLine = tokenizer_1.Tokenizer.readLine, skipWhitespace = tokenizer_1.Tokenizer.skipWhitespace, eatValue = tokenizer_1.Tokenizer.eatValue, eatLine = tokenizer_1.Tokenizer.eatLine, markStart = tokenizer_1.Tokenizer.markStart;
var reWhitespace = /\s+/;
var reTitle = /(^\*|REMARK)*/;
function State(tokenizer, runtimeCtx) {
return {
tokenizer: tokenizer,
runtimeCtx: runtimeCtx,
};
}
function handleAtoms(state, count) {
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
var tokenizer, atomId, segmentName, residueId, residueName, atomName, atomType, charge, mass, position, line, isLammpsFull, n, length, linesAlreadyRead;
return (0, tslib_1.__generator)(this, function (_a) {
switch (_a.label) {
case 0:
tokenizer = state.tokenizer;
atomId = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
segmentName = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
residueId = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
residueName = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
atomName = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
atomType = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
charge = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
mass = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
position = tokenizer.position;
line = readLine(tokenizer).trim();
tokenizer.position = position;
isLammpsFull = line.split(reWhitespace).length === 7;
n = isLammpsFull ? 6 : 8;
length = tokenizer.length;
linesAlreadyRead = 0;
return [4 /*yield*/, (0, mol_task_1.chunkedSubtask)(state.runtimeCtx, 100000, void 0, function (chunkSize) {
var linesToRead = Math.min(count - linesAlreadyRead, chunkSize);
for (var i = 0; i < linesToRead; ++i) {
for (var j = 0; j < n; ++j) {
skipWhitespace(tokenizer);
markStart(tokenizer);
eatValue(tokenizer);
if (isLammpsFull) {
switch (j) {
case 0:
tokenizer_1.TokenBuilder.addUnchecked(atomId, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 1:
tokenizer_1.TokenBuilder.addUnchecked(residueId, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 2:
tokenizer_1.TokenBuilder.addUnchecked(atomName, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 3:
tokenizer_1.TokenBuilder.addUnchecked(atomType, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 4:
tokenizer_1.TokenBuilder.addUnchecked(charge, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 5:
tokenizer_1.TokenBuilder.addUnchecked(mass, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
}
}
else {
switch (j) {
case 0:
tokenizer_1.TokenBuilder.addUnchecked(atomId, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 1:
tokenizer_1.TokenBuilder.addUnchecked(segmentName, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 2:
tokenizer_1.TokenBuilder.addUnchecked(residueId, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 3:
tokenizer_1.TokenBuilder.addUnchecked(residueName, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 4:
tokenizer_1.TokenBuilder.addUnchecked(atomName, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 5:
tokenizer_1.TokenBuilder.addUnchecked(atomType, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 6:
tokenizer_1.TokenBuilder.addUnchecked(charge, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 7:
tokenizer_1.TokenBuilder.addUnchecked(mass, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
}
}
}
// ignore any extra columns
eatLine(tokenizer);
markStart(tokenizer);
}
linesAlreadyRead += linesToRead;
return linesToRead;
}, function (ctx) { return ctx.update({ message: 'Parsing...', current: tokenizer.position, max: length }); })];
case 1:
_a.sent();
return [2 /*return*/, {
count: count,
atomId: (0, token_1.TokenColumnProvider)(atomId)(db_1.Column.Schema.int),
segmentName: isLammpsFull
? (0, token_1.TokenColumnProvider)(residueId)(db_1.Column.Schema.str)
: (0, token_1.TokenColumnProvider)(segmentName)(db_1.Column.Schema.str),
residueId: (0, token_1.TokenColumnProvider)(residueId)(db_1.Column.Schema.int),
residueName: isLammpsFull
? (0, token_1.TokenColumnProvider)(residueId)(db_1.Column.Schema.str)
: (0, token_1.TokenColumnProvider)(residueName)(db_1.Column.Schema.str),
atomName: (0, token_1.TokenColumnProvider)(atomName)(db_1.Column.Schema.str),
atomType: (0, token_1.TokenColumnProvider)(atomType)(db_1.Column.Schema.str),
charge: (0, token_1.TokenColumnProvider)(charge)(db_1.Column.Schema.float),
mass: (0, token_1.TokenColumnProvider)(mass)(db_1.Column.Schema.float)
}];
}
});
});
}
function handleBonds(state, count) {
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
var tokenizer, atomIdA, atomIdB, length, bondsAlreadyRead;
return (0, tslib_1.__generator)(this, function (_a) {
switch (_a.label) {
case 0:
tokenizer = state.tokenizer;
atomIdA = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
atomIdB = tokenizer_1.TokenBuilder.create(tokenizer.data, count * 2);
length = tokenizer.length;
bondsAlreadyRead = 0;
return [4 /*yield*/, (0, mol_task_1.chunkedSubtask)(state.runtimeCtx, 10, void 0, function (chunkSize) {
var bondsToRead = Math.min(count - bondsAlreadyRead, chunkSize);
for (var i = 0; i < bondsToRead; ++i) {
for (var j = 0; j < 2; ++j) {
skipWhitespace(tokenizer);
markStart(tokenizer);
eatValue(tokenizer);
switch (j) {
case 0:
tokenizer_1.TokenBuilder.addUnchecked(atomIdA, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
case 1:
tokenizer_1.TokenBuilder.addUnchecked(atomIdB, tokenizer.tokenStart, tokenizer.tokenEnd);
break;
}
}
}
bondsAlreadyRead += bondsToRead;
return bondsToRead;
}, function (ctx) { return ctx.update({ message: 'Parsing...', current: tokenizer.position, max: length }); })];
case 1:
_a.sent();
return [2 /*return*/, {
count: count,
atomIdA: (0, token_1.TokenColumnProvider)(atomIdA)(db_1.Column.Schema.int),
atomIdB: (0, token_1.TokenColumnProvider)(atomIdB)(db_1.Column.Schema.int),
}];
}
});
});
}
function parseTitle(state, count) {
var title = [];
for (var i = 0; i < count; ++i) {
var line = readLine(state.tokenizer);
title.push(line.replace(reTitle, '').trim());
}
return title;
}
function parseInternal(data, ctx) {
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
var tokenizer, state, title, atoms, bonds, id, line, numTitle, numAtoms, numBonds, result;
return (0, tslib_1.__generator)(this, function (_a) {
switch (_a.label) {
case 0:
tokenizer = (0, tokenizer_1.Tokenizer)(data);
state = State(tokenizer, ctx);
title = undefined;
atoms = undefined;
bonds = undefined;
id = readLine(state.tokenizer).trim();
_a.label = 1;
case 1:
if (!(tokenizer.tokenEnd < tokenizer.length)) return [3 /*break*/, 8];
line = readLine(state.tokenizer).trim();
if (!line.includes('!NTITLE')) return [3 /*break*/, 2];
numTitle = parseInt(line.split(reWhitespace)[0]);
title = parseTitle(state, numTitle);
return [3 /*break*/, 7];
case 2:
if (!line.includes('!NATOM')) return [3 /*break*/, 4];
numAtoms = parseInt(line.split(reWhitespace)[0]);
return [4 /*yield*/, handleAtoms(state, numAtoms)];
case 3:
atoms = _a.sent();
return [3 /*break*/, 7];
case 4:
if (!line.includes('!NBOND')) return [3 /*break*/, 6];
numBonds = parseInt(line.split(reWhitespace)[0]);
return [4 /*yield*/, handleBonds(state, numBonds)];
case 5:
bonds = _a.sent();
return [3 /*break*/, 8]; // TODO: don't break when the below are implemented
case 6:
if (line.includes('!NTHETA')) {
// TODO
}
else if (line.includes('!NPHI')) {
// TODO
}
else if (line.includes('!NIMPHI')) {
// TODO
}
else if (line.includes('!NDON')) {
// TODO
}
else if (line.includes('!NACC')) {
// TODO
}
else if (line.includes('!NNB')) {
// TODO
}
else if (line.includes('!NGRP NST2')) {
// TODO
}
else if (line.includes('!MOLNT')) {
// TODO
}
else if (line.includes('!NUMLP NUMLPH')) {
// TODO
}
else if (line.includes('!NCRTERM')) {
// TODO
}
_a.label = 7;
case 7: return [3 /*break*/, 1];
case 8:
if (title === undefined) {
title = [];
}
if (atoms === undefined) {
return [2 /*return*/, result_1.ReaderResult.error('no atoms data')];
}
if (bonds === undefined) {
return [2 /*return*/, result_1.ReaderResult.error('no bonds data')];
}
result = {
id: id,
title: title,
atoms: atoms,
bonds: bonds
};
return [2 /*return*/, result_1.ReaderResult.success(result)];
}
});
});
}
function parsePsf(data) {
var _this = this;
return mol_task_1.Task.create('Parse PSF', function (ctx) { return (0, tslib_1.__awaiter)(_this, void 0, void 0, function () {
return (0, tslib_1.__generator)(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, parseInternal(data, ctx)];
case 1: return [2 /*return*/, _a.sent()];
}
});
}); });
}
exports.parsePsf = parsePsf;
//# sourceMappingURL=parser.js.map