I understand that sending a transaction proposal (like the code below)
return channel.sendTransactionProposal(request);
}).then((proposalResponse) => {
//need to decode results
});
in which proposalResponse is a ProposalResponseObject (https://fabric-sdk-node.github.io/global.html#ProposalResponseObject__anchor) in which ProposalResponseObject[0] is an array of ProposalResponse.
I too understand that
proposalResponse[0][i].response.payload is the return message of shim.Success
But how to I decode proposalResponse[0][i].payload and proposalResponse[1]?
I tried to take a look at BlockDecoder (https://github.com/hyperledger/fabric-sdk-node/blob/release-1.1/fabric-client/lib/BlockDecoder.js) but I can't find the correct method to decode the aforementioned items?
I looked into protobuf structure of the payload response and used codes in the BlockDecoder.js. In case anybody is interested:
var grpc = require('grpc');
var _responseProto = grpc.load(__dirname + '/node_modules/fabric-client/lib/protos/peer/proposal_response.proto').protos;
var _proposalProto = grpc.load(__dirname + '/node_modules/fabric-client/lib/protos/peer/proposal.proto').protos;
var _rwsetProto = grpc.load(__dirname + '/node_modules/fabric-client/lib/protos/ledger/rwset/rwset.proto').rwset;
var _kv_rwsetProto = grpc.load(__dirname + '/node_modules/fabric-client/lib/protos/ledger/rwset/kvrwset/kv_rwset.proto').kvrwset;
var _ccEventProto = grpc.load(__dirname + '/node_modules/fabric-client/lib/protos/peer/chaincode_event.proto').protos;
module.exports = function decodeProposalResponsePayload(proposal_response_payload_bytes) {
var proposal_response_payload = {};
var proto_proposal_response_payload = _responseProto.ProposalResponsePayload.decode(proposal_response_payload_bytes);
proposal_response_payload.proposal_hash = proto_proposal_response_payload.getProposalHash().toBuffer().toString('hex');
proposal_response_payload.extension = decodeChaincodeAction(proto_proposal_response_payload.getExtension());
return proposal_response_payload;
}
function decodeChaincodeAction(action_bytes) {
var chaincode_action = {};
var proto_chaincode_action = _proposalProto.ChaincodeAction.decode(action_bytes);
chaincode_action.results = decodeReadWriteSets(proto_chaincode_action.getResults());
chaincode_action.events = decodeChaincodeEvents(proto_chaincode_action.getEvents());
chaincode_action.response = decodeResponse(proto_chaincode_action.getResponse());
chaincode_action.chaincode_id = decodeChaincodeID(proto_chaincode_action.getChaincodeId());
return chaincode_action;
}
function decodeReadWriteSets(rw_sets_bytes) {
var proto_tx_read_write_set = _rwsetProto.TxReadWriteSet.decode(rw_sets_bytes);
var tx_read_write_set = {};
tx_read_write_set.data_model = proto_tx_read_write_set.getDataModel();
if (proto_tx_read_write_set.getDataModel() === _rwsetProto.TxReadWriteSet.DataModel.KV) {
tx_read_write_set.ns_rwset = [];
let proto_ns_rwset = proto_tx_read_write_set.getNsRwset();
for (let i in proto_ns_rwset) {
let kv_rw_set = {};
let proto_kv_rw_set = proto_ns_rwset[i];
kv_rw_set.namespace = proto_kv_rw_set.getNamespace();
kv_rw_set.rwset = decodeKVRWSet(proto_kv_rw_set.getRwset());
tx_read_write_set.ns_rwset.push(kv_rw_set);
}
} else {
// not able to decode this type of rw set, return the array of byte[]
tx_read_write_set.ns_rwset = proto_tx_read_write_set.getNsRwset();
}
return tx_read_write_set;
}
function decodeKVRWSet(kv_bytes) {
var proto_kv_rw_set = _kv_rwsetProto.KVRWSet.decode(kv_bytes);
var kv_rw_set = {};
// KV readwrite set has three arrays
kv_rw_set.reads = [];
kv_rw_set.range_queries_info = [];
kv_rw_set.writes = [];
// build reads
let reads = kv_rw_set.reads;
var proto_reads = proto_kv_rw_set.getReads();
for (let i in proto_reads) {
reads.push(decodeKVRead(proto_reads[i]));
}
// build range_queries_info
let range_queries_info = kv_rw_set.range_queries_info;
var proto_range_queries_info = proto_kv_rw_set.getRangeQueriesInfo();
for (let i in proto_range_queries_info) {
range_queries_info.push(decodeRangeQueryInfo(proto_range_queries_info[i]));
}
// build writes
let writes = kv_rw_set.writes;
var proto_writes = proto_kv_rw_set.getWrites();
for (let i in proto_writes) {
writes.push(decodeKVWrite(proto_writes[i]));
}
return kv_rw_set;
}
function decodeKVRead(proto_kv_read) {
let kv_read = {};
kv_read.key = proto_kv_read.getKey();
let proto_version = proto_kv_read.getVersion();
if (proto_version) {
kv_read.version = {};
kv_read.version.block_num = proto_version.getBlockNum().toString();
kv_read.version.tx_num = proto_version.getTxNum().toString();
} else {
kv_read.version = null;
}
return kv_read;
}
function decodeRangeQueryInfo(proto_range_query_info) {
let range_query_info = {};
range_query_info.start_key = proto_range_query_info.getStartKey();
range_query_info.end_key = proto_range_query_info.getEndKey();
range_query_info.itr_exhausted = proto_range_query_info.getItrExhausted();
// reads_info is one of QueryReads
let proto_raw_reads = proto_range_query_info.getRawReads();
if (proto_raw_reads) {
range_query_info.raw_reads = {};
range_query_info.raw_reads.kv_reads = [];
for (let i in proto_raw_reads.kv_reads) {
let kv_read = decodeKVRead(proto_raw_reads.kv_reads[i]);
range_query_info.raw_reads.kv_reads.push(kv_read);
}
}
// or QueryReadsMerkleSummary
let proto_reads_merkle_hashes = proto_range_query_info.getReadsMerkleHashes();
if (proto_reads_merkle_hashes) {
range_query_info.reads_merkle_hashes = {};
range_query_info.reads_merkle_hashes.max_degree = proto_reads_merkle_hashes.getMaxDegree();
range_query_info.reads_merkle_hashes.max_level = proto_reads_merkle_hashes.getMaxLevel();
range_query_info.reads_merkle_hashes.max_level_hashes = proto_reads_merkle_hashes.getMaxLevelHashes();
}
return range_query_info;
}
function decodeKVWrite(proto_kv_write) {
let kv_write = {};
kv_write.key = proto_kv_write.getKey();
kv_write.is_delete = proto_kv_write.getIsDelete();
kv_write.value = proto_kv_write.getValue().toBuffer().toString();
return kv_write;
}
function decodeChaincodeEvents(event_bytes) {
var events = {};
var proto_events = _ccEventProto.ChaincodeEvent.decode(event_bytes);
events.chaincode_id = proto_events.getChaincodeId();
events.tx_id = proto_events.getTxId();
events.event_name = proto_events.getEventName();
events.payload = proto_events.getPayload().toBuffer();
return events;
}
function decodeResponse(proto_response) {
if (!proto_response) return null;
var response = {};
response.status = proto_response.getStatus();
response.message = proto_response.getMessage();
response.payload = proto_response.getPayload().toBuffer().toString();
return response;
}
function decodeChaincodeID(proto_chaincode_id) {
var chaincode_id = {};
if(!proto_chaincode_id) {
console.log('decodeChaincodeID - no proto_chaincode_id found');
return chaincode_id;
}
chaincode_id.path = proto_chaincode_id.getPath();
chaincode_id.name = proto_chaincode_id.getName();
chaincode_id.version = proto_chaincode_id.getVersion();
return chaincode_id;
}
As far as i know ProposalResponse Payload can be got from the TransactionActionInfo class and returns a byte[] array that can be decoded with a utility method .
void blockWalker(Channel channel) throws InvalidArgumentException, ProposalException, IOException {
try {
BlockchainInfo channelInfo = channel.queryBlockchainInfo();
for (long current = channelInfo.getHeight() - 1; current > -1; --current) {
BlockInfo returnedBlock = channel.queryBlockByNumber(current);
final long blockNumber = returnedBlock.getBlockNumber();
out("current block number %d has data hash: %s", blockNumber, Hex.encodeHexString(returnedBlock.getDataHash()));
out("current block number %d has previous hash id: %s", blockNumber, Hex.encodeHexString(returnedBlock.getPreviousHash()));
out("current block number %d has calculated block hash is %s", blockNumber, Hex.encodeHexString(SDKUtils.calculateBlockHash(blockNumber, returnedBlock.getPreviousHash(), returnedBlock.getDataHash())));
final int envelopCount = returnedBlock.getEnvelopCount();
assertEquals(1, envelopCount);
out("current block number %d has %d envelope count:", blockNumber, returnedBlock.getEnvelopCount());
int i = 0;
for (BlockInfo.EnvelopeInfo envelopeInfo : returnedBlock.getEnvelopeInfos()) {
++i;
out(" Transaction number %d has transaction id: %s", i, envelopeInfo.getTransactionID());
final String channelId = envelopeInfo.getChannelId();
out(" Transaction number %d has channel id: %s", i, channelId);
out(" Transaction number %d has epoch: %d", i, envelopeInfo.getEpoch());
out(" Transaction number %d has transaction timestamp: %tB %<te, %<tY %<tT %<Tp", i, envelopeInfo.getTimestamp());
out(" Transaction number %d has type id: %s", i, "" + envelopeInfo.getType());
if (envelopeInfo.getType() == TRANSACTION_ENVELOPE) {
BlockInfo.TransactionEnvelopeInfo transactionEnvelopeInfo = (BlockInfo.TransactionEnvelopeInfo) envelopeInfo;
out(" Transaction number %d has %d actions", i, transactionEnvelopeInfo.getTransactionActionInfoCount());
out(" Transaction number %d isValid %b", i, transactionEnvelopeInfo.isValid());
out(" Transaction number %d validation code %d", i, transactionEnvelopeInfo.getValidationCode());
int j = 0;
for (BlockInfo.TransactionEnvelopeInfo.TransactionActionInfo transactionActionInfo : transactionEnvelopeInfo.getTransactionActionInfos()) {
++j;
out(" Transaction action %d has response status %d", j, transactionActionInfo.getResponseStatus());
out(" Transaction action %d has response message bytes as string: %s", j,
printableString(new String(transactionActionInfo.getResponseMessageBytes(), "UTF-8")));
out(" Transaction action %d has %d endorsements", j, transactionActionInfo.getEndorsementsCount());
for (int n = 0; n < transactionActionInfo.getEndorsementsCount(); ++n) {
BlockInfo.EndorserInfo endorserInfo = transactionActionInfo.getEndorsementInfo(n);
out("Endorser %d signature: %s", n, Hex.encodeHexString(endorserInfo.getSignature()));
out("Endorser %d endorser: %s", n, new String(endorserInfo.getEndorser(), "UTF-8"));
}
out(" Transaction action %d has %d chaincode input arguments", j, transactionActionInfo.getChaincodeInputArgsCount());
for (int z = 0; z < transactionActionInfo.getChaincodeInputArgsCount(); ++z) {
out(" Transaction action %d has chaincode input argument %d is: %s", j, z,
printableString(new String(transactionActionInfo.getChaincodeInputArgs(z), "UTF-8")));
}
out(" Transaction action %d proposal response status: %d", j,
transactionActionInfo.getProposalResponseStatus());
out(" Transaction action %d proposal response payload: %s", j,
printableString(new String(transactionActionInfo.getProposalResponsePayload())));
TxReadWriteSetInfo rwsetInfo = transactionActionInfo.getTxReadWriteSet();
if (null != rwsetInfo) {
out(" Transaction action %d has %d name space read write sets", j, rwsetInfo.getNsRwsetCount());
for (TxReadWriteSetInfo.NsRwsetInfo nsRwsetInfo : rwsetInfo.getNsRwsetInfos()) {
final String namespace = nsRwsetInfo.getNaamespace();
KvRwset.KVRWSet rws = nsRwsetInfo.getRwset();
int rs = -1;
for (KvRwset.KVRead readList : rws.getReadsList()) {
rs++;
out(" Namespace %s read set %d key %s version [%d:%d]", namespace, rs, readList.getKey(),
readList.getVersion().getBlockNum(), readList.getVersion().getTxNum());
if ("bar".equals(channelId) && blockNumber == 2) {
if ("example_cc_go".equals(namespace)) {
if (rs == 0) {
assertEquals("a", readList.getKey());
assertEquals(1, readList.getVersion().getBlockNum());
assertEquals(0, readList.getVersion().getTxNum());
} else if (rs == 1) {
assertEquals("b", readList.getKey());
assertEquals(1, readList.getVersion().getBlockNum());
assertEquals(0, readList.getVersion().getTxNum());
} else {
fail(format("unexpected readset %d", rs));
}
TX_EXPECTED.remove("readset1");
}
}
}
rs = -1;
for (KvRwset.KVWrite writeList : rws.getWritesList()) {
rs++;
String valAsString = printableString(new String(writeList.getValue().toByteArray(), "UTF-8"));
out(" Namespace %s write set %d key %s has value '%s' ", namespace, rs,
writeList.getKey(),
valAsString);
if ("bar".equals(channelId) && blockNumber == 2) {
if (rs == 0) {
assertEquals("a", writeList.getKey());
assertEquals("400", valAsString);
} else if (rs == 1) {
assertEquals("b", writeList.getKey());
assertEquals("400", valAsString);
} else {
fail(format("unexpected writeset %d", rs));
}
TX_EXPECTED.remove("writeset1");
}
}
}
}
}
}
}
}
if (!TX_EXPECTED.isEmpty()) {
// fail(TX_EXPECTED.get(0));
}
} catch (InvalidProtocolBufferRuntimeException e) {
throw e.getCause();
}
}
The utility method printableString()
static String printableString(final String string) {
int maxLogStringLength = 10000;
if (string == null || string.length() == 0) {
return string;
}
String ret = string.replaceAll("[^\\p{Print}]", "\n");
ret = ret.substring(0, Math.min(ret.length(), maxLogStringLength)) + (ret.length() > maxLogStringLength ? "..." : "");
return ret;
}
Related
Hello i'm have some problem with BIP32
i use BIP32 library in nodejs and dart(flutter)
in dart(flutter) BIP32 derivePath show different privatekey but not of all mnemonic is just incorrect some mnemonic
in 1000 mnemonic different privatekey 1-2 mnemonic
below is dart derivePath,,
what is inccorect in this code:
Help me pls.
BIP32 derivePath(String path) {
final regex = new RegExp(r"^(m\/)?(\d+'?\/)*\d+'?$");
if (!regex.hasMatch(path)) throw new ArgumentError("Expected BIP32 Path");
List<String> splitPath = path.split("/");
if (splitPath[0] == "m") {
if (parentFingerprint != 0) throw new ArgumentError("Expected master, got child");
splitPath = splitPath.sublist(1);
print("splitPath: "+ splitPath);
}
print("splitPath: "+ splitPath);
return splitPath.fold(this, (BIP32 prevHd,String indexStr) {
int index;
if (indexStr.substring(indexStr.length - 1) == "'") {
index = int.parse(indexStr.substring(0, indexStr.length - 1));
return prevHd.deriveHardened(index);
} else {
index = int.parse(indexStr);
return prevHd.derive(index);
}
});
}
BIP32 derive(int index) {
if (index > UINT32_MAX || index < 0) throw new ArgumentError("Expected UInt32");
final isHardened = index >= HIGHEST_BIT;
Uint8List data = new Uint8List(37);
if (isHardened) {
if (isNeutered()) {
throw new ArgumentError("Missing private key for hardened child key");
}
data[0] = 0x00;
data.setRange(1, 33, privateKey);
data.buffer.asByteData().setUint32(33, index);
} else {
data.setRange(0, 33, publicKey);
data.buffer.asByteData().setUint32(33, index);
}
final I = hmacSHA512(chainCode, data);
final IL = I.sublist(0, 32);
final IR = I.sublist(32);
if (!ecc.isPrivate(IL)) {
return derive(index + 1);
}
BIP32 hd;
if (!isNeutered()) {
final ki = ecc.privateAdd(privateKey, IL);
if (ki == null) return derive(index + 1);
hd = BIP32.fromPrivateKey(ki, IR, network);
} else {
final ki = ecc.pointAddScalar(publicKey, IL, true);
if (ki == null) return derive(index + 1);
hd = BIP32.fromPublicKey(ki, IR, network);
}
hd.depth = depth + 1;
hd.index = index;
hd.parentFingerprint = fingerprint.buffer.asByteData().getUint32(0);
return hd;
}
BIP32 deriveHardened(int index) {
if (index > UINT31_MAX || index < 0) throw new ArgumentError("Expected UInt31");
return this.derive(index + HIGHEST_BIT);
}
OK can solved this problem problem occur in the ecuve function bytes is less than 32 bytes just padding 0 in the left equal 32 bytes just solved it !
I'm trying to verify a document from QLDB using nodejs. I have been following the Java verification example as much as I can, but I'm unable to calculate the same digest as stored on the ledger.
This is the code I have come up with. I query the proof and block hash from QLDB and then try to calculate digest in the same way as in Java example. But after concatinating the two hashes and calculating the new hash from the result I get the wrong output from crypto.createHash('sha256').update(c).digest("base64"). I have also tried using "base64" instead of "hex" with different, but also wrong result.
const rBlock = makeReader(res.Revision.IonText);
var block = [];
rBlock.next();
rBlock.stepIn();
rBlock.next();
while (rBlock.next() != null) {
if (rBlock.fieldName() == 'hash') {
block.push(Buffer.from(rBlock.byteValue()).toString('hex'));
}
}
console.log(block);
var proof = [];
const rProof = makeReader(res.Proof.IonText);
rProof.next();
rProof.stepIn();
while (rProof.next() != null) {
proof.push(Buffer.from(rProof.byteValue()).toString('hex'));
}
var ph = block[0];
var c;
for (var i = 0; i < proof.length; i++) {
console.log(proof[i])
for (var j = 0; j < ph.length; j++) {
if (parseInt(ph[j]) > parseInt(proof[i][j])){
c = ph + proof[i];
break;
}
if (parseInt(ph[j]) < parseInt(proof[i][j])){
c = proof[i] + ph;
break;
}
}
ph = crypto.createHash('sha256').update(c).digest("hex");
console.log(ph);
console.log();
}
I have figure it out. The problem was that I was converting the blobs to hex strings and hash them instead of the raw values. For anyone wanting to verify data in node, here is the bare solution:
ledgerInfo.getRevision(params).then(res => {
console.log(res);
const rBlock = makeReader(res.Revision.IonText);
var ph;
rBlock.next();
rBlock.stepIn();
rBlock.next();
while (rBlock.next() != null) {
if (rBlock.fieldName() == 'hash') {
ph = rBlock.byteValue()
}
}
var proof = [];
const rProof = makeReader(res.Proof.IonText);
rProof.next();
rProof.stepIn();
while (rProof.next() != null) {
proof.push(rProof.byteValue());
}
for (var i = 0; i < proof.length; i++) {
var c;
if (hashComparator(ph, proof[i]) < 0) {
c = concatTypedArrays(ph, proof[i]);
}
else {
c = concatTypedArrays(proof[i], ph);
}
var buff = crypto.createHash('sha256').update(c).digest("hex");
ph = Uint8Array.from(Buffer.from(buff, 'hex'));
}
console.log(Buffer.from(ph).toString('base64'));
}).catch(err => {
console.log(err, err.stack)
});
function concatTypedArrays(a, b) {
var c = new (a.constructor)(a.length + b.length);
c.set(a, 0);
c.set(b, a.length);
return c;
}
function hashComparator(h1, h2) {
for (var i = h1.length - 1; i >= 0; i--) {
var diff = (h1[i]<<24>>24) - (h2[i]<<24>>24);
if (diff != 0)
return diff;
}
return 0;
}
I am executing large query,so my app throwing time out error. Some of the thread suggested to added command time out but after adding those lines it take longer to get response back, any idea why or what am i missing in my code?
public int CreateRecord(string theCommand, DataSet theInputData)
{
int functionReturnValue = 0;
int retVal = 0;
SqlParameter objSqlParameter = default(SqlParameter);
DataSet dsParameter = new DataSet();
int i = 0;
try
{
//Set the command text (stored procedure name or SQL statement).
mobj_SqlCommand.CommandTimeout = 120;
mobj_SqlCommand.CommandText = theCommand;
mobj_SqlCommand.CommandType = CommandType.StoredProcedure;
for (i = 0; i <= (theInputData.Tables.Count - 1); i++)
{
if (theInputData.Tables[i].Rows.Count > 0)
{
dsParameter.Tables.Add(theInputData.Tables[i].Copy());
}
}
objSqlParameter = new SqlParameter("#theXmlData", SqlDbType.Text);
objSqlParameter.Direction = ParameterDirection.Input;
objSqlParameter.Value = "<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>" + dsParameter.GetXml();
//Attach to the parameter to mobj_SqlCommand.
mobj_SqlCommand.Parameters.Add(objSqlParameter);
//Finally, execute the command.
retVal = (int)mobj_SqlCommand.ExecuteScalar();
//Detach the parameters from mobj_SqlCommand, so it can be used again.
mobj_SqlCommand.Parameters.Clear();
functionReturnValue = retVal;
}
catch (Exception ex)
{
throw new System.Exception(ex.Message);
}
finally
{
//Clean up the objects created in this object.
if (mobj_SqlConnection.State == ConnectionState.Open)
{
mobj_SqlConnection.Close();
mobj_SqlConnection.Dispose();
mobj_SqlConnection = null;
}
if ((mobj_SqlCommand != null))
{
mobj_SqlCommand.Dispose();
mobj_SqlCommand = null;
}
if ((mobj_SqlDataAdapter != null))
{
mobj_SqlDataAdapter.Dispose();
mobj_SqlDataAdapter = null;
}
if ((dsParameter != null))
{
dsParameter.Dispose();
dsParameter = null;
}
objSqlParameter = null;
}
return functionReturnValue;
}
I want to concatenate a bunch of different files of a single type into one large file. For example, many javascript files into one large file, many css files down to one etc. I want to create a sourcemap of the files pre concatenation, but I do not know where to start. I am working in Node, but I am also open to solutions in other environments.
I know there are tools that can do this, but they seem to be on a language by language basis (uglifyjs, cssmin or whatever its called these days), but I want a tool that is not language specific.
Also, I would like to define how the files are bound. For example, in javascript I want to give each file its own closure with an IIFE. Such as:
(function () {
// File
}());
I can also think of other wrappers I would like to implement for different files.
Here are my options as I see it right now. However, I don't know which is best or how to start any of them.
Find a module that does this (I'm working in a Node.js environment)
Create an algorithm with Mozilla's source-map module. For that I also see a couple options.
Only map each line to the new line location
Map every single character to the new location
Map every word to its new location (this options seems way out of scope)
Don't even worry about source maps
What do you guys think about these options. I've already tried options 2.1 and 2.2, but the solution seemed way too complicated for a concatenation algorithm and it did not perform perfectly in the Google Chrome browser tools.
I implemented code without any dependencies like this:
export interface SourceMap {
version: number; // always 3
file?: string;
sourceRoot?: string;
sources: string[];
sourcesContent?: string[];
names?: string[];
mappings: string | Buffer;
}
const emptySourceMap: SourceMap = { version: 3, sources: [], mappings: new Buffer(0) }
var charToInteger = new Buffer(256);
var integerToChar = new Buffer(64);
charToInteger.fill(255);
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='.split('').forEach((char, i) => {
charToInteger[char.charCodeAt(0)] = i;
integerToChar[i] = char.charCodeAt(0);
});
class DynamicBuffer {
buffer: Buffer;
size: number;
constructor() {
this.buffer = new Buffer(512);
this.size = 0;
}
ensureCapacity(capacity: number) {
if (this.buffer.length >= capacity)
return;
let oldBuffer = this.buffer;
this.buffer = new Buffer(Math.max(oldBuffer.length * 2, capacity));
oldBuffer.copy(this.buffer);
}
addByte(b: number) {
this.ensureCapacity(this.size + 1);
this.buffer[this.size++] = b;
}
addVLQ(num: number) {
var clamped: number;
if (num < 0) {
num = (-num << 1) | 1;
} else {
num <<= 1;
}
do {
clamped = num & 31;
num >>= 5;
if (num > 0) {
clamped |= 32;
}
this.addByte(integerToChar[clamped]);
} while (num > 0);
}
addString(s: string) {
let l = Buffer.byteLength(s);
this.ensureCapacity(this.size + l);
this.buffer.write(s, this.size);
this.size += l;
}
addBuffer(b: Buffer) {
this.ensureCapacity(this.size + b.length);
b.copy(this.buffer, this.size);
this.size += b.length;
}
toBuffer(): Buffer {
return this.buffer.slice(0, this.size);
}
}
function countNL(b: Buffer): number {
let res = 0;
for (let i = 0; i < b.length; i++) {
if (b[i] === 10) res++;
}
return res;
}
export class SourceMapBuilder {
outputBuffer: DynamicBuffer;
sources: string[];
mappings: DynamicBuffer;
lastSourceIndex = 0;
lastSourceLine = 0;
lastSourceCol = 0;
constructor() {
this.outputBuffer = new DynamicBuffer();
this.mappings = new DynamicBuffer();
this.sources = [];
}
addLine(text: string) {
this.outputBuffer.addString(text);
this.outputBuffer.addByte(10);
this.mappings.addByte(59); // ;
}
addSource(content: Buffer, sourceMap?: SourceMap) {
if (sourceMap == null) sourceMap = emptySourceMap;
this.outputBuffer.addBuffer(content);
let sourceLines = countNL(content);
if (content.length > 0 && content[content.length - 1] !== 10) {
sourceLines++;
this.outputBuffer.addByte(10);
}
let sourceRemap = [];
sourceMap.sources.forEach((v) => {
let pos = this.sources.indexOf(v);
if (pos < 0) {
pos = this.sources.length;
this.sources.push(v);
}
sourceRemap.push(pos);
});
let lastOutputCol = 0;
let inputMappings = (typeof sourceMap.mappings === "string") ? new Buffer(<string>sourceMap.mappings) : <Buffer>sourceMap.mappings;
let outputLine = 0;
let ip = 0;
let inOutputCol = 0;
let inSourceIndex = 0;
let inSourceLine = 0;
let inSourceCol = 0;
let shift = 0;
let value = 0;
let valpos = 0;
const commit = () => {
if (valpos === 0) return;
this.mappings.addVLQ(inOutputCol - lastOutputCol);
lastOutputCol = inOutputCol;
if (valpos === 1) {
valpos = 0;
return;
}
let outSourceIndex = sourceRemap[inSourceIndex];
this.mappings.addVLQ(outSourceIndex - this.lastSourceIndex);
this.lastSourceIndex = outSourceIndex;
this.mappings.addVLQ(inSourceLine - this.lastSourceLine);
this.lastSourceLine = inSourceLine;
this.mappings.addVLQ(inSourceCol - this.lastSourceCol);
this.lastSourceCol = inSourceCol;
valpos = 0;
}
while (ip < inputMappings.length) {
let b = inputMappings[ip++];
if (b === 59) { // ;
commit();
this.mappings.addByte(59);
inOutputCol = 0;
lastOutputCol = 0;
outputLine++;
} else if (b === 44) { // ,
commit();
this.mappings.addByte(44);
} else {
b = charToInteger[b];
if (b === 255) throw new Error("Invalid sourceMap");
value += (b & 31) << shift;
if (b & 32) {
shift += 5;
} else {
let shouldNegate = value & 1;
value >>= 1;
if (shouldNegate) value = -value;
switch (valpos) {
case 0: inOutputCol += value; break;
case 1: inSourceIndex += value; break;
case 2: inSourceLine += value; break;
case 3: inSourceCol += value; break;
}
valpos++;
value = shift = 0;
}
}
}
commit();
while (outputLine < sourceLines) {
this.mappings.addByte(59);
outputLine++;
}
}
toContent(): Buffer {
return this.outputBuffer.toBuffer();
}
toSourceMap(sourceRoot?: string): Buffer {
return new Buffer(JSON.stringify({ version: 3, sourceRoot, sources: this.sources, mappings: this.mappings.toBuffer().toString() }));
}
}
I, at first, implemented "index map" from that spec, only to find out that it is not supported by any browser.
Another project that could be useful to look at is magic string.
I have an array of files like this..
string[] unZippedFiles;
the idea is that I want to parse these files in paralle. As they are parsed a record gets placed on a concurrentbag. As record is getting placed I want to kick of the update function.
Here is what I am doing in my Main():
foreach(var file in unZippedFiles)
{ Parallel.Invoke
(
() => ImportFiles(file),
() => UpdateTest()
);
}
this is what the code of Update loooks like.
static void UpdateTest( )
{
Console.WriteLine("Updating/Inserting merchant information.");
while (!merchCollection.IsEmpty || producingRecords )
{
merchant x;
if (merchCollection.TryTake(out x))
{
UPDATE_MERCHANT(x.m_id, x.mInfo, x.month, x.year);
}
}
}
This is what the import code looks like. It's pretty much a giant string parser.
System.IO.StreamReader SR = new System.IO.StreamReader(fileName);
long COUNTER = 0;
StringBuilder contents = new StringBuilder( );
string M_ID = "";
string BOF_DELIMITER = "%%MS_SKEY_0000_000_PDF:";
string EOF_DELIMITER = "%%EOF";
try
{
record_count = 0;
producingRecords = true;
for (COUNTER = 0; COUNTER <= SR.BaseStream.Length - 1; COUNTER++)
{
if (SR.EndOfStream)
{
break;
}
contents.AppendLine(Strings.Trim(SR.ReadLine()));
contents.AppendLine(System.Environment.NewLine);
//contents += Strings.Trim(SR.ReadLine());
//contents += Strings.Chr(10);
if (contents.ToString().IndexOf((EOF_DELIMITER)) > -1)
{
if (contents.ToString().StartsWith(BOF_DELIMITER) & contents.ToString().IndexOf(EOF_DELIMITER) > -1)
{
string data = contents.ToString();
M_ID = data.Substring(data.IndexOf("_M") + 2, data.Substring(data.IndexOf("_M") + 2).IndexOf("_"));
Console.WriteLine("Merchant: " + M_ID);
merchant newmerch;
newmerch.m_id = M_ID;
newmerch.mInfo = data.Substring(0, (data.IndexOf(EOF_DELIMITER) + 5));
newmerch.month = DateTime.Now.AddMonths(-1).Month;
newmerch.year = DateTime.Now.AddMonths(-1).Year;
//Update(newmerch);
merchCollection.Add(newmerch);
}
contents.Clear();
//GC.Collect();
}
}
SR.Close();
// UpdateTest();
}
catch (Exception ex)
{
producingRecords = false;
}
finally
{
producingRecords = false;
}
}
the problem i am having is that the Update runs once and then the importfile function just takes over and does not yield to the update function. Any ideas on what am I doing wrong would be of great help.
Here's my stab at fixing your thread synchronisation. Note that I haven't changed any of the code from the functional standpoint (with the exception of taking out the catch - it's generally a bad idea; exceptions need to be propagated).
Forgive if something doesn't compile - I'm writing this based on incomplete snippets.
Main
foreach(var file in unZippedFiles)
{
using (var merchCollection = new BlockingCollection<merchant>())
{
Parallel.Invoke
(
() => ImportFiles(file, merchCollection),
() => UpdateTest(merchCollection)
);
}
}
Update
private void UpdateTest(BlockingCollection<merchant> merchCollection)
{
Console.WriteLine("Updating/Inserting merchant information.");
foreach (merchant x in merchCollection.GetConsumingEnumerable())
{
UPDATE_MERCHANT(x.m_id, x.mInfo, x.month, x.year);
}
}
Import
Don't forget to pass in merchCollection as a parameter - it should not be static.
System.IO.StreamReader SR = new System.IO.StreamReader(fileName);
long COUNTER = 0;
StringBuilder contents = new StringBuilder( );
string M_ID = "";
string BOF_DELIMITER = "%%MS_SKEY_0000_000_PDF:";
string EOF_DELIMITER = "%%EOF";
try
{
record_count = 0;
for (COUNTER = 0; COUNTER <= SR.BaseStream.Length - 1; COUNTER++)
{
if (SR.EndOfStream)
{
break;
}
contents.AppendLine(Strings.Trim(SR.ReadLine()));
contents.AppendLine(System.Environment.NewLine);
//contents += Strings.Trim(SR.ReadLine());
//contents += Strings.Chr(10);
if (contents.ToString().IndexOf((EOF_DELIMITER)) > -1)
{
if (contents.ToString().StartsWith(BOF_DELIMITER) & contents.ToString().IndexOf(EOF_DELIMITER) > -1)
{
string data = contents.ToString();
M_ID = data.Substring(data.IndexOf("_M") + 2, data.Substring(data.IndexOf("_M") + 2).IndexOf("_"));
Console.WriteLine("Merchant: " + M_ID);
merchant newmerch;
newmerch.m_id = M_ID;
newmerch.mInfo = data.Substring(0, (data.IndexOf(EOF_DELIMITER) + 5));
newmerch.month = DateTime.Now.AddMonths(-1).Month;
newmerch.year = DateTime.Now.AddMonths(-1).Year;
//Update(newmerch);
merchCollection.Add(newmerch);
}
contents.Clear();
//GC.Collect();
}
}
SR.Close();
// UpdateTest();
}
finally
{
merchCollection.CompleteAdding();
}
}