wi-import
Version:
Extract file from las, ascii to json
177 lines (166 loc) • 6.24 kB
JavaScript
;
let readline = require('line-by-line');
let fs = require('fs');
let firstline = require('firstline');
let hashDir = require('../hash-dir');
let config = require('../common-config');
function writeFromCsv(buffer, fileName, value, index, defaultNull, type) {
buffer.data.push({index: index, value: value});
}
function customSplit(str, delimiter) {
let words;
if (str.includes('"')) {
str = str.replace(/"[^"]+"/g, function (match, idx, string) {
let tmp = match.replace(/"/g, '');
return '"' + Buffer.from(tmp).toString('base64') + '"';
})
words = str.split(delimiter);
words = words.map(function (word) {
if (word.includes('"')) {
return '"' + Buffer.from(word.replace(/"/g, ''), 'base64').toString() + '"';
}
else return word;
})
} else {
words = str.split(delimiter);
}
return words;
}
function extractFromCSV(inputURL, importData) {
return new Promise((resolve, reject) => {
let rl = new readline(inputURL);
let fieldsName = importData.titleFields;
let filePathes = new Object();
let BUFFERS = new Object();
let count = 0;
let datasets = {};
let wellInfo = importData.well;
let logDataIndex = 0;
let units = importData.units || [];
rl.on('line', function (line) {
line = line.trim();
if (count == 0) {
fieldsName.forEach(function (fieldName) {
BUFFERS[fieldName] = {
count: 0,
data: ''
};
});
} else if (count == 1) {
let dataset = {
name: wellInfo.dataset,
curves: [],
top: wellInfo.STRT.value,
bottom: wellInfo.STOP.value,
step: wellInfo.STEP.value,
params: [],
unit: importData.unitDepth
};
datasets[dataset.name] = dataset;
for (let i = 0; i < fieldsName.length; i++) {
let curve = {
name: fieldsName[i],
unit: units[i] || '',
datasetname: dataset.name,
wellname: wellInfo.name,
startDepth: wellInfo.STRT.value,
stopDepth: wellInfo.STOP.value,
step: wellInfo.STEP.value,
path: ''
};
const hashstr =
importData.userInfo.username +
wellInfo.name +
curve.datasetname +
curve.name +
curve.unit +
curve.step;
filePathes[curve.name] = hashDir.createPath(
config.dataPath,
hashstr,
curve.name + '.txt'
);
curve.path = filePathes[curve.name].replace(
config.dataPath + '/',
''
);
fs.writeFileSync(filePathes[curve.name], '');
datasets[dataset.name].curves.push(curve);
BUFFERS[curve.name] = {
writeStream: fs.createWriteStream(filePathes[curve.name]),
data: []
};
}
} else {
line = customSplit(line, ',');
fieldsName.forEach(function (fieldName, i) {
let format = datasets[wellInfo.dataset].curves[i].type;
if (
format != 'TEXT' &&
parseFloat(line[i + 1]) != parseFloat(wellInfo.NULL.value)
) {
format = 'NUMBER';
if (line[i+1] && isNaN(line[i + 1])) {
format = 'TEXT';
}
datasets[wellInfo.dataset].curves[i].type = format;
BUFFERS[fieldName].type = format;
}
if (importData.coreData) {
let data =
writeFromCsv(
BUFFERS[fieldName],
filePathes[fieldName],
line[i + 1],
line[0],
wellInfo.NULL.value,
format
);
} else {
writeFromCsv(
BUFFERS[fieldName],
filePathes[fieldName],
line[i + 1],
count - 2,
wellInfo.NULL.value,
format
);
}
});
}
count++;
});
rl.on('end', function () {
if (fieldsName) {
fieldsName.forEach(function (fieldName) {
let data = "";
BUFFERS[fieldName].data.forEach(x => {
if (!x.value || parseFloat(x.value) === parseFloat(wellInfo.NULL.value)) {
if (BUFFERS[fieldName].type == "TEXT") {
data += x.index + " " + "" + "\n";
} else {
data += x.index + " null" + "\n";
}
} else {
data += x.index + " " + x.value + "\n";
}
})
BUFFERS[fieldName].writeStream.write(data);
BUFFERS[fieldName].writeStream.end();
});
}
let output = [];
wellInfo.datasets = [];
delete wellInfo.dataset;
for (var datasetName in datasets) {
let dataset = datasets[datasetName];
wellInfo.datasets.push(dataset);
}
output.push(wellInfo);
resolve(output);
console.log('Read finished');
fs.unlinkSync(inputURL);
});
});
}
module.exports.extractFromCSV = extractFromCSV;