i have small issue with Lodash's merge with method.
I have object that contains nested objects and arrays. I managed to merge arrays with adding a customizer.
function customier(a, b) {
if (Array.isArray(a) {
return a.concat(b)
}
}
It works, but the problem is that now objects are not merged at all. The old value persist. Tried with something like that:
function customier(a, b) {
if (Array.isArray(a) {
return a.concat(b)
} else {
return add(a,b);
}
}
but without any success. Any tips?
Related
I want to do the following:
When the property "mobile" is changed, check to see if X is true, if so, set set a variable Y and call requestUpdate to re-render the element. The element will render something different based on variable X.
so shouldUpdate function could be:
shouldUpdate(changedProperties) {
if (changedProperties.has("mobile")) {
this._showDialog = true;
this.requestUpdate();
await this.updateComplete;
}
return this.openingDialog;
}
What's the best way to do this without using the shouldUpdate function?
You can use willUpdate to compute Y variable. willUpdate used to compute values needed during the update.
willUpdate(changedProperties) {
if (changedProperties.has("mobile")) {
this._showDialog = true;
}
}
I want to create a dynamic menu bar by fetching data from two collections (supcat and cat) then combining the two to create a new array which i will access on page load for menu but the push() is not working.
ngOnInit() {
this.cattest();}
cattest(){
var x;
this.supcatobj.fetchsupcat().subscribe(
(res)=>
{
if(res.length!=0)
{this.supcat=res;
for(let x=0;x<this.supcat.length; x++)
{
this.catobj.fetchallcat(this.supcat[x]["_id"]).subscribe(
(resp)=>
{
this.allcat=resp;
for(let y=0;y<this.allcat.length;y++)
{
}
this.testarr[x].push(this.supcat[x]["supcatname"],this.allcat);
}
);
}
}
}
);}
Instead of nesting subscribe() calls, I would try to compose separate observables for your two different collections and then use the combineLatest() operator to combine those into your desired array. It is hard to see exactly what you are working for, but conceptually it would be something like this:
const supcat$ = this.supcatobj.fetchsupcat().pipe(filter(cat => cat.length > 0));
const allCat$ = this.catobj.fetchallcat();
const combinedCats$ = combineLatest(supcat$, allCat$);
const res$ = combinedCats$.pipe(map(res => {
// do operation that involves both data sets
});
Remember that map() will return a new array. This way you will only need to subscribe to the one variable, and if you put it at the class level you could use the async pipe (|) in your template so it will unsubscribe automatically.
I have 2 scipts almost identical with a cascade of function calls nested in a fiber.
This one (parsing Tx in a blockchain) with three calls works perfectly
wait.launchFiber(blockchain)
function blockchain() {
foreach block {
parseBlock (blockIndex)
}
}
function parseBlock(blockIndex) {
foreach Tx in block {
parseTx(txHash)
}
}
function parseTx (txHash) {
if ( txHashInDB(txHash) ) {
do something
}
}
function txHashInDB (txHash) {
var theTx = wait.forMethod(Tx, 'findOne', {'hash': txHash});
return (theTx) ? true : false;
}
Then I have to do something similar with the mempool. In this case I don't have blocks, only transactions, so I have only 2 calls and I get this error message:
Error: wait.for can only be called inside a fiber
wait.launchFiber(watchMempool);
function watchMempool() {
web3.eth.filter('pending', function (error, txHash) {
parseTx(txHash);
});
}
function parseTx (txHash) {
if ( txHashInDB(txHash) ) {
do something
}
}
function txHashInDB (txHash) {
var theTx = wait.forMethod(Tx, 'findOne', {'hash': txHash});
return (theTx) ? true : false;
}
I don't understand what the problem is. Those two scripts have the same structure !
I think for array functions like map or filter you need to use the wait.parallel extensions, i.e. in your case something like:
function watchMempool() {
wait.parallel.filter(web3.eth, parseTx);
}
(Note: I'm just assuming web3.eth is an array; if not, you should probably add a bit more context to your question, or try to boil down the problem to a more generic example).
Is there a way, i can access a variable outside the closure. The closure here is a stage in the Jenkinsfile. So, the snippet looks like this:
node('pool'){
try{
stage('init'){
def list = []
//some code to create the list
}
stage('deploy'){
//use the list create in the above stage/closure
}
}
catch(err){
//some mail step
}
}
With this code, i cannot access the list which was created in the first stage/closure.
How can i set to get this newly created list accessible to the next stage/closure?
#tim_yates.. with your suggestion. This works. It was easy at the end :)
node('pool') {
try {
def list = [] //define the list outside of the closure
stage('init') {
//some code to create/push elements in the list
}
stage('deploy') {
//use the list create in the above stage/closure
}
} catch (err) {
//some mail step
}
}
I know it's late, but worths mentioning that when you define a type or def (for dynamic resolution) you're creating a local scope variable that will be available only inside the closure.
If you omit the declaration the variable will be available to the whole script:
node('pool'){
try {
stage('Define') {
list = 2
println "The value of list is $list"
}
stage('Print') {
list += 1
echo "The value of list is $list"
}
1/0 // making an exception to check the value of list
}
catch(err){
echo "Final value of list is $list"
}
}
Returns :
The value of list is 2
The value of list is 3
Final value of list is 3
I've been playing around with Couchbase Server and now just tried replicating my local db to Cloudant, but am getting conflicting results for my map/reduce function pair to build a set of unique tags with their associated projects...
// map.js
function(doc) {
if (doc.tags) {
for(var t in doc.tags) {
emit(doc.tags[t], doc._id);
}
}
}
// reduce.js
function(key,values,rereduce) {
if (!rereduce) {
var res=[];
for(var v in values) {
res.push(values[v]);
}
return res;
} else {
return values.length;
}
}
In Cloudbase server this returns JSON like:
{"rows":[
{"key":"3d","value":["project1","project3","project8","project10"]},
{"key":"agents","value":["project2"]},
{"key":"fabrication","value":["project3","project5"]}
]}
That's exactly what I wanted & expected. However, the same query on the Cloudant replica, returns this:
{"rows":[
{"key":"3d","value":4},
{"key":"agents","value":1},
{"key":"fabrication","value":2}
]}
So it somehow only returns the length of the value array... Highly confusing & am grateful for any insights by some M&R ninjas... ;)
It looks like this is exactly the behavior you would expect given your reduce function. The key part is this:
else {
return values.length;
}
In Cloudant, rereduce is always called (since the reduce needs to span over multiple shards.) In this case, rereduce calls values.length, which will only return the length of the array.
I prefer to reduce/re-reduce implicitly rather than depending on the rereduce parameter.
function(doc) { // map
if (doc.tags) {
for(var t in doc.tags) {
emit(doc.tags[t], {id:doc._id, tag:doc.tags[t]});
}
}
}
Then reduce checks whether it is accumulating document ids from the identical tag, or whether it is just counting different tags.
function(keys, vals, rereduce) {
var initial_tag = vals[0].tag;
return vals.reduce(function(state, val) {
if(initial_tag && val.tag === initial_tag) {
// Accumulate ids which produced this tag.
var ids = state.ids;
if(!ids)
ids = [ state.id ]; // Build initial list from the state's id.
return { tag: val.tag,
, ids: ids.concat([val.id])
};
} else {
var state_count = state.ids ? state.ids.length : state;
var val_count = val.ids ? val.ids.length : val;
return state_count + val_count;
}
})
}
(I didn't test this code, but you get the idea. As long as the tag value is the same, it doesn't matter whether it's a reduce or rereduce. Once different tags start reducing together, it detects that because the tag value will change. So at that point just start accumulating.
I have used this trick before, although IMO it's rarely worth it.
Also in your specific case, this is a dangerous reduce function. You are building a wide list to see all the docs that have a tag. CouchDB likes tall lists, not fat lists. If you want to see all the docs that have a tag, you could map them.
for(var a = 0; a < doc.tags.length; a++) {
emit(doc.tags[a], doc._id);
}
Now you can query /db/_design/app/_view/docs_by_tag?key="3d" and you should get
{"total_rows":287,"offset":30,"rows":[
{"id":"project1","key":"3d","value":"project1"}
{"id":"project3","key":"3d","value":"project3"}
{"id":"project8","key":"3d","value":"project8"}
{"id":"project10","key":"3d","value":"project10"}
]}