Append repeated text using sed or awk? - text

I have the following config file:
servers = (
{
host = "localhost";
...
timeout = 5;
},
{
host = "127.0.0.1";
...
timeout = 0;
},
{
host = "example.com";
...
timeout = 99;
}
);
I want to append "index" setting at the end of each section so the config look like this:
servers = (
{
host = "localhost";
...
timeout = 5;
index = 1;
},
{
host = "127.0.0.1";
...
timeout = 0;
index = 2;
},
{
host = "example.com";
...
timeout = 99;
index = 3;
}
);
How can I do this with conventional Unix tools like sed or awk?

This adds a line with index = ... after each line that has timeout as the first word.
awk '1;$1=="timeout"{printf " index = %d;\n", ++i}' file

Related

Chrome extension replaces letters, but breaks certain pages

I used the following TreeWalker as a template from this post https://stackoverflow.com/a/37178130/7102491 and modified it to skip the element 'script', to prevent certain pages like a google search from breaking to no avail. Does anyone know how I can change the code to prevent breaking certain pages? Thanks.
var replaceArry = [
[/b/gi, 'better'],
[/Terms of service/gi, 'Términos y condiciones'],
[/Privacy policy/gi, 'Privacidad'],
// etc.
];
var numTerms = replaceArry.length;
var txtWalker = document.createTreeWalker (
document.body,
NodeFilter.SHOW_TEXT,
{ acceptNode: function (node) {
//-- Skip whitespace-only nodes
if (node.nodeValue.trim() && node.parentNode.nodeName != 'SCRIPT')
return NodeFilter.FILTER_ACCEPT;
return NodeFilter.FILTER_SKIP;
}
},
false
);
var txtNode = null;
while (txtNode = txtWalker.nextNode () ) {
var oldTxt = txtNode.nodeValue;
for (var J = 0; J < numTerms; J++) {
oldTxt = oldTxt.replace (replaceArry[J][0], replaceArry[J][1]);
}
txtNode.nodeValue = oldTxt;
}

How to create sourcemaps for concatenated files

I want to concatenate a bunch of different files of a single type into one large file. For example, many javascript files into one large file, many css files down to one etc. I want to create a sourcemap of the files pre concatenation, but I do not know where to start. I am working in Node, but I am also open to solutions in other environments.
I know there are tools that can do this, but they seem to be on a language by language basis (uglifyjs, cssmin or whatever its called these days), but I want a tool that is not language specific.
Also, I would like to define how the files are bound. For example, in javascript I want to give each file its own closure with an IIFE. Such as:
(function () {
// File
}());
I can also think of other wrappers I would like to implement for different files.
Here are my options as I see it right now. However, I don't know which is best or how to start any of them.
Find a module that does this (I'm working in a Node.js environment)
Create an algorithm with Mozilla's source-map module. For that I also see a couple options.
Only map each line to the new line location
Map every single character to the new location
Map every word to its new location (this options seems way out of scope)
Don't even worry about source maps
What do you guys think about these options. I've already tried options 2.1 and 2.2, but the solution seemed way too complicated for a concatenation algorithm and it did not perform perfectly in the Google Chrome browser tools.
I implemented code without any dependencies like this:
export interface SourceMap {
version: number; // always 3
file?: string;
sourceRoot?: string;
sources: string[];
sourcesContent?: string[];
names?: string[];
mappings: string | Buffer;
}
const emptySourceMap: SourceMap = { version: 3, sources: [], mappings: new Buffer(0) }
var charToInteger = new Buffer(256);
var integerToChar = new Buffer(64);
charToInteger.fill(255);
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='.split('').forEach((char, i) => {
charToInteger[char.charCodeAt(0)] = i;
integerToChar[i] = char.charCodeAt(0);
});
class DynamicBuffer {
buffer: Buffer;
size: number;
constructor() {
this.buffer = new Buffer(512);
this.size = 0;
}
ensureCapacity(capacity: number) {
if (this.buffer.length >= capacity)
return;
let oldBuffer = this.buffer;
this.buffer = new Buffer(Math.max(oldBuffer.length * 2, capacity));
oldBuffer.copy(this.buffer);
}
addByte(b: number) {
this.ensureCapacity(this.size + 1);
this.buffer[this.size++] = b;
}
addVLQ(num: number) {
var clamped: number;
if (num < 0) {
num = (-num << 1) | 1;
} else {
num <<= 1;
}
do {
clamped = num & 31;
num >>= 5;
if (num > 0) {
clamped |= 32;
}
this.addByte(integerToChar[clamped]);
} while (num > 0);
}
addString(s: string) {
let l = Buffer.byteLength(s);
this.ensureCapacity(this.size + l);
this.buffer.write(s, this.size);
this.size += l;
}
addBuffer(b: Buffer) {
this.ensureCapacity(this.size + b.length);
b.copy(this.buffer, this.size);
this.size += b.length;
}
toBuffer(): Buffer {
return this.buffer.slice(0, this.size);
}
}
function countNL(b: Buffer): number {
let res = 0;
for (let i = 0; i < b.length; i++) {
if (b[i] === 10) res++;
}
return res;
}
export class SourceMapBuilder {
outputBuffer: DynamicBuffer;
sources: string[];
mappings: DynamicBuffer;
lastSourceIndex = 0;
lastSourceLine = 0;
lastSourceCol = 0;
constructor() {
this.outputBuffer = new DynamicBuffer();
this.mappings = new DynamicBuffer();
this.sources = [];
}
addLine(text: string) {
this.outputBuffer.addString(text);
this.outputBuffer.addByte(10);
this.mappings.addByte(59); // ;
}
addSource(content: Buffer, sourceMap?: SourceMap) {
if (sourceMap == null) sourceMap = emptySourceMap;
this.outputBuffer.addBuffer(content);
let sourceLines = countNL(content);
if (content.length > 0 && content[content.length - 1] !== 10) {
sourceLines++;
this.outputBuffer.addByte(10);
}
let sourceRemap = [];
sourceMap.sources.forEach((v) => {
let pos = this.sources.indexOf(v);
if (pos < 0) {
pos = this.sources.length;
this.sources.push(v);
}
sourceRemap.push(pos);
});
let lastOutputCol = 0;
let inputMappings = (typeof sourceMap.mappings === "string") ? new Buffer(<string>sourceMap.mappings) : <Buffer>sourceMap.mappings;
let outputLine = 0;
let ip = 0;
let inOutputCol = 0;
let inSourceIndex = 0;
let inSourceLine = 0;
let inSourceCol = 0;
let shift = 0;
let value = 0;
let valpos = 0;
const commit = () => {
if (valpos === 0) return;
this.mappings.addVLQ(inOutputCol - lastOutputCol);
lastOutputCol = inOutputCol;
if (valpos === 1) {
valpos = 0;
return;
}
let outSourceIndex = sourceRemap[inSourceIndex];
this.mappings.addVLQ(outSourceIndex - this.lastSourceIndex);
this.lastSourceIndex = outSourceIndex;
this.mappings.addVLQ(inSourceLine - this.lastSourceLine);
this.lastSourceLine = inSourceLine;
this.mappings.addVLQ(inSourceCol - this.lastSourceCol);
this.lastSourceCol = inSourceCol;
valpos = 0;
}
while (ip < inputMappings.length) {
let b = inputMappings[ip++];
if (b === 59) { // ;
commit();
this.mappings.addByte(59);
inOutputCol = 0;
lastOutputCol = 0;
outputLine++;
} else if (b === 44) { // ,
commit();
this.mappings.addByte(44);
} else {
b = charToInteger[b];
if (b === 255) throw new Error("Invalid sourceMap");
value += (b & 31) << shift;
if (b & 32) {
shift += 5;
} else {
let shouldNegate = value & 1;
value >>= 1;
if (shouldNegate) value = -value;
switch (valpos) {
case 0: inOutputCol += value; break;
case 1: inSourceIndex += value; break;
case 2: inSourceLine += value; break;
case 3: inSourceCol += value; break;
}
valpos++;
value = shift = 0;
}
}
}
commit();
while (outputLine < sourceLines) {
this.mappings.addByte(59);
outputLine++;
}
}
toContent(): Buffer {
return this.outputBuffer.toBuffer();
}
toSourceMap(sourceRoot?: string): Buffer {
return new Buffer(JSON.stringify({ version: 3, sourceRoot, sources: this.sources, mappings: this.mappings.toBuffer().toString() }));
}
}
I, at first, implemented "index map" from that spec, only to find out that it is not supported by any browser.
Another project that could be useful to look at is magic string.

AS3 (string).split quotation mark only

I want to split up a document by quotation it's marks. I see (here) that they're able to fake this answer by adding a '\' at the beginning of the quotation mark, however in my document there are hundreds of these strings I'm trying to cut string out of, so changing that manually would be a real pain and time taker.
Here's an example of the string I'm trying to cut from:
D
And here's an example of my current code:
private function onShopTextLoaded(e:Event):void
{
shopArrayOfWebsites = e.target.data.split(/\n/);
for (var i:String in shopArrayOfWebsites)
{
trace("shopArrayOriginal: " + shopArrayOfWebsites[i]);
var arrayString:String = shopArrayOfWebsites[i].split('"' /* << that won't work */ );
trace(arrayString[1]);
//shopArrayOfWebsites[i] = arrayString[1];
}
}
private function postInShopView():void
{
var iLevel:Number = 1;
var iSection:Number = 1;
var iShop:Number = 0;
for (var i:String in shopArrayOfWebsites)
{
iShop++;
if(iShop >= 5)
{
iSection++;
iShop = 0;
}
if(iSection >= 5)
{
iLevel++;
iSection = 1;
}
var shopStringEquiv:String = "L" + iLevel.toString() + "S" + iSection.toString() + "Shop" + iShop.toString();
if(global.shopTarget == shopStringEquiv)
{
var result:uint = ExternalInterface.call("showShopFrame", shopArrayOfWebsites[i]);
}
//trace(shopStringEquiv);
//trace(shopArrayOfWebsites[i]);
}
}
I get an error of:
ReferenceError: Error #1069: Property 1 not found on String and there is no default value.
So from here I'm not quite sure how I'm able to split up this document. Any ideas? Thanks!

Drupal removing a node reference from a node

Ok, trying to process a script, both PHP and JavaScript, where I am moving a particular content type NODE from one reference to another. This is the structure:
I have a PROJECT
Inside each PROJECT are PAGES
Inside each PAGE are CALLOUTS
and Inside each CALLOUT are PRODUCTS.
What I want to do is take a PRODUCT from one CALLOUT to another CALLOUT. I am able to merge these, but now what I want to do is delete the first instance. An example:
I have PRODUCT AAG-794200 that is on PAGE 6 CALLOUT A. I am merging that PRODUCT with PAGE 6 CALLOUT B.
I can get the product to merge, but now I need to remove it from CALLOUT A. Here is my code:
$merge = explode(',', $merge); //Merge SKUs
$mpages = explode(',', $mpages); //Merge Pages
$mcallouts = explode(',', $mcallouts); //Merge Callouts
$mcallout_nid = explode(',', $mcallout_nid); //Merge Current callout
$length = count($merge);
$e = 0;
while ($e < $length) {
//Where is the SKU going to?
$to_callout_letter = strtoupper($mcallouts[$e]);
$to_page_num = $mpages[$e];
$sku = $merge[$e];
$from_callout = $mcallout_nid[$e];
//Where is the SKU coming from?
$other_callout = node_load($from_callout);
//Need page ID of current callout for project purposes
$page_nid = $other_callout->field_page[0]['nid'];
$page = node_load($page_nid);
//Need the project NID
$project_nid = $page->field_project[0]['nid'];
//We need to get the NID of the page we are going to
$page_nid = db_query('SELECT * FROM content_type_page WHERE field_page_order_value = "%d" and field_project_nid = "%d" ORDER BY vid DESC LIMIT 1', $to_page_num, $project_nid);
$page_nid_res = db_fetch_array($page_nid);
$to_page_nid = $page_nid_res['nid'];
//We need to get the NID of the callout here
$co_nid = db_query('SELECT * FROM content_type_callout WHERE field_identifier_value = "%s" and field_page_nid = "%d"', $to_callout_letter, $to_page_nid);
$co_nid_res = db_fetch_array($co_nid);
$to_callout_letter_nid = $co_nid_res['nid'];
//Load the present callout the SKU resides on
$f_callout = node_load($from_callout);
$callout = node_load($to_callout_letter_nid);
$long = count($f_callout->field_skus);
$deletecallout = array();
foreach($f_callout->field_skus as $skus) {
$s = 0;
while ($s < $long) {
if($skus['nid'] == $sku) {
$callout->field_skus[] = $skus;
$s++;
}
else {
$deletecallout[] = $skus;
$s++;
}
}
}
foreach($other_callout->field_images as $old_image) {
$callout->field_images[] = $old_image;
}
foreach($other_callout->field_line_art as $old_image) {
$callout->field_line_art[] = $old_image;
}
foreach($other_callout->field_swatches as $old_image) {
$callout->field_swatches[] = $old_image;
}
$callout->field_copy_text[0]['value'] .= $other_callout->field_copy_text[0]['value'];
$callout->field_notes[0]['value'] .= $other_callout->field_notes[0]['value'];
$callout->field_image_notes[0]['value'] .= $other_callout->field_image_notes[0]['value'];
$callout->field_status[0]['value'] = 'In Process';
node_save($callout);
This causes the PRODUCTS to MERGE, but not delete the original.
Thanks for any help. I know it's something simple, and it will be a palm-to-face moment.
I was actually able to solve this myself. #Chris - The brace ended after node_save(callout); I must have missed that when I copied and pasted. However, here is the code I ended up using:
$merge = explode(',', $merge); //Merge SKUs
$mpages = explode(',', $mpages); //Merge Pages
$mcallouts = explode(',', $mcallouts); //Merge Callouts
$mcallout_nid = explode(',', $mcallout_nid); //Merge Current callout
if($merge[0] !== '0') {
//Store NIDs of Old Callouts to the proper SKU
$oc_sku = array();
$oc_sku_e = count($merge);
$oc_sku_ee = 0;
while ($oc_sku_ee < $oc_sku_e) {
$curr_sku = $merge[$oc_sku_ee];
$curr_oldco = $mcallout_nid[$oc_sku_ee];
$oc_sku[$curr_sku] = $curr_oldco;
$oc_sku_ee++;
}
//Convert page numbers to page_nids
$pc = count($mpages); //How many pages are we getting
$pc_e = 0;
while($pc_e < $pc) {
$nid = $mpages[$pc_e];
$sql = db_query('SELECT * FROM content_type_page WHERE field_page_order_value = "%d" AND field_project_nid = "%d" ORDER BY vid DESC LIMIT 1', $nid, $project_nid);
$res = db_fetch_array($sql);
if($res) {
$npage_arr[] = $res['nid'];
} else { //If there is no page, we need to create it here.
$node = new StdClass();
$node->type = 'page';
$node->title = 'Page ' . $nid . ' of ' . $project->title;
$node->field_project[0]['nid'] = $project_nid;
$node->field_page_order[0]['value'] = $nid;
$node = node_submit($node);
node_save($node);
$npage_arr[] = $node->nid;
}
$pc_e++;
}
// Convert callout letters to callout_nids
$coc = count($mcallouts);
$coc_e = 0;
while($coc_e < $coc) {
$cnid = strtoupper($mcallouts[$coc_e]);
$pnid = $npage_arr[$coc_e];
$page_node = node_load($pnid);
$sql = db_query('SELECT * FROM content_type_callout WHERE field_identifier_value = "%s" AND field_page_nid = "%d" ORDER BY vid DESC LIMIT 1', $cnid, $pnid);
$res = db_fetch_array($sql);
if($res) {
$cpage_arr[] = $res['nid'];
} else { //If there is no callout that exists, we need to make it here.
$callout_node = new stdClass();
$callout_node->type = 'callout';
$callout_node->field_page[0]['nid'] = $pnid;
$callout_node->field_identifier[0]['value'] = $cnid;
$callout_node->field_sequence[0]['value'] = 0;
$callout_node->title = "Callout ".$callout." on page ".$page_node->field_page_order[0]['value'];
$callout_node->field_project[0]['nid'] = $project->nid;
$callout_node->field_wholesaler[0]['value'] = $project->field_wholesaler[0]['value'];
$callout_node->field_skus = array();
$callout_node->status = 1;
$callout_node->uid = 1;
$callout_node->revision = true;
$callout_node = node_submit($callout_node);
node_save($callout_node);
$cpage_arr[] = $callout_node->nid;
}
$coc_e++;
}
//Now we need to assign the skus to the appropriate callout for processing
$coc2 = count($cpage_arr);
$coc_e2 = 0;
while($coc_e2 < $coc2) {
$co = $cpage_arr[$coc_e2];
if($co !== '0') {
$sku = $merge[$coc_e2];
$m_arr[$co][] = $sku;
}
$coc_e2++;
}
//we need a way to centrally store all NID's of SKUs to the callouts they belong to
$oc_arr = array();
$oc = count($mcallout_nid);
$oc_e = 0;
while($oc_e < $oc) {
$f_callout = $mcallout_nid[$oc_e];
$former_callout = node_load($f_callout);
foreach($former_callout->field_skus as $key=>$skus) {
$oc_arr[] = $skus;
}
$oc_e++;
}
//Now we are processing the Pages/Callouts/SKUs to save
$pc_e2 = 0;
foreach($m_arr as $key=>$values) {
$callout = node_load($key);
foreach($values as $value) {
$oc = count($oc_arr);
$oc_e = 0;
while($oc_e < $oc) {
$skus = $oc_arr[$oc_e];
if($value == $skus['nid']) {
$callout->field_skus[] = $skus;
//$nid = $oc_sku[$value];
$old_callout_info[] = $oc_sku[$value];
$oc_e = $oc;
}
else {
$oc_e++;
}
}
}
foreach($old_callout_info as $nid) {
/* $nid = $oc_sku[$value]; */
$former_callout = node_load($nid);
foreach($former_callout->field_images as $old_image) {
$callout->field_images[] = $old_image;
}
foreach($former_callout->field_line_art as $old_image) {
$callout->field_line_art[] = $old_image;
}
foreach($former_callout->field_swatches as $old_image) {
$callout->field_swatches[] = $old_image;
}
$callout->field_copy_text[0]['value'] .= $former_callout->field_copy_text[0]['value'];
}
$callout->field_notes[0]['value'] .= $former_callout->field_notes[0]['value'];
$callout->field_image_notes[0]['value'] .= $former_callout->field_image_notes[0]['value'];
$callout->field_logos = $former_callout->field_logos;
$callout->field_affiliations = $former_callout->field_affiliations;
$callout->field_graphics = $former_callout->field_graphics;
$callout->revision = 1;
$callout->field_status[0]['value'] = 'inprocess';
node_save($callout);
$pc_e2++;
}
}
I realize this can probably be simplified in a way, but as for now, this works perfectly considering what I'm trying to do. No complaints from the client so far. Thanks for taking a look Drupal Community.

Node is not launching

I'm desperately trying to make node.js work again on Ubuntu 12.04 LTS.
I installed it before 2-3 weeks ago and everything went fine, I used it daily for that period of time.
But today, suddenly it just wouldn't work anymore. The way it bugs is really strange :
node -v works and returns v0.8.2
the node command works too, I can access the console and do a console.log
but when I use node with a file like this node server.js, Ubuntu just goes to a new line :
kollektiv#kollektiv-PC:~/node-projects$ node server.js
kollektiv#kollektiv-PC:~/node-projects$
I already reinstalled Ubuntu this evening but I get the same result.
I also did multiple apt-get upgrade and apt-get update in case some node.js dependencies would be out of date.
The way I installed node.js is by compiling the source following this tutorial : --> Compiling Node.js from source on Ubuntu 10.24 - shapeshed
I even did a chmod 777 server.js on the server file just to be sure but that didn't change anything either.
Thank you a lot in advance for your help !
EDIT : Content of server.js
var net = require('net'),
server = net.createServer();
var crypto = require('crypto'),
shasum = crypto.createHash('sha256');
var alpha = [],
i = 0,
cle = '';
while(i < 256) {
alpha.push(String.fromCharCode(i));
i++;
}
// CRYPTAGE -- START --
function cryptProcess(cle, txt) {
var k = txt.length,
j = k / cle.length,
cledeBase = cle,
txtc = '',
i = 1;
while(i < j) {
cle = cle + cledeBase;
i++;
}
function crypt(cleu, letr) {
//if(alpha.indexOf(letr) == -1) return "§";
var biIndex = alpha.indexOf(letr) + alpha.indexOf(cleu), x;
sumIndex = biIndex - alpha.length;
x = sumIndex >= 0 ? alpha[sumIndex] : alpha[biIndex];
return x;
}
while(k--) {
txtc = crypt(cle[k], txt[k]) + txtc;
}
return txtc;
}
function decryptProcess(cle, txtc) {
var k = txtc.length,
j = k / cle.length,
cledeBase = cle,
txt = '',
i = 1;
while(i < j) {
cle = cle + cledeBase;
i++;
}
txt = '';
function decrypt(cleu, letc) {
//if(alpha.indexOf(letc) == -1) return "§";
var biIndex = letc - alpha.indexOf(cleu), x;
x = biIndex >= 0 ? alpha[biIndex] : alpha[biIndex + alphabet.length];
return x;
}
while(k--) {
txt = decrypt(cle[k], txtc[k]) + txt;
}
return txt;
}
// CRYPTAGE -- END --
server.on('connection', function(client) {
var connecOne = 0;
function talk(data) {
var msg = data.toString('utf8');
var msgEnc = cryptProcess(cle, msg);
client.write(msgEnc);
console.log(msg + '\nsend as\n' + msgEnc);
}
client.once('data', function(data) {
function triHandShake() {
}
});
client.on('data', function(data) {
var msg = data.toString('utf8');
if(connecOne === 0) {
connectionOne(msg);
connecOne++;
}
else if(connecOne === 1) {
// Check for paragraph symbol
//authentification with cookie as cle
}
var msgDec = decryptProcess(cle, msg);
console.log(msgDec + '\nreiceved as\n' + msgDec);
});
client.on('end', function() {
connecOne = 0;
});
});
You need to call server.listen to listen for connections and start the process as expected.
server.listen(8124, function() { //'listening' listener
console.log('server bound');
});

Resources