convert few fields of a nested json to a dictionary in Pyspark - apache-spark

I have a huge nested json as below
"evaluation_parameters": {},
"meta": {
"active_batch_definition": {
"batch_identifiers": {
"pipeline_stage": "prod",
"run_id": "run_20220224"
},
"data_asset_name": "STORES_DQ_SUITE",
"data_connector_name": "stores_connector",
"datasource_name": "stores"
},
"batch_markers": {
"ge_load_time": "20220224T054318.272571Z"
},
"batch_spec": {
"batch_data": "SparkDataFrame",
"data_asset_name": "STORES_DQ_SUITE"
},
"expectation_suite_name": "STORES_DQ_SUITE",
"great_expectations_version": "0.14.7",
"run_id": {
"run_name": "stores_template_20220224-054316",
"run_time": "2022-02-24T05:43:16.678220+00:00"
},
"validation_time": "20220224T054318.389119Z"
},
"results": [
{
"exception_info": {
"exception_message": null,
"exception_traceback": null,
"raised_exception": false
},
"expectation_config": {
"expectation_type": "expect_column_to_exist",
"kwargs": {
"batch_id": "46f2769bf8c7729a40efddfa0597de22",
"column": "country"
},
"meta": {}
},
"meta": {},
"result": {},
"success": true
},
{
"exception_info": {
"exception_message": null,
"exception_traceback": null,
"raised_exception": false
},
"expectation_config": {
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {
"batch_id": "46f2769bf8c7729a40efddfa0597de22",
"column": "country"
},
"meta": {}
},
"meta": {},
"result": {
"element_count": 102,
"partial_unexpected_counts": [],
"partial_unexpected_index_list": null,
"partial_unexpected_list": [],
"unexpected_count": 0,
"unexpected_percent": 0.0
},
"success": true
},
{
"exception_info": {
"exception_message": null,
"exception_traceback": null,
"raised_exception": false
},
"expectation_config": {
"expectation_type": "expect_column_values_to_be_of_type",
"kwargs": {
"batch_id": "46f2769bf8c7729a40efddfa0597de22",
"column": "country",
"type_": "StringType"
},
"meta": {}
},
"meta": {},
"result": {
"observed_value": "StringType"
},
"success": true
},
{
"exception_info": {
"exception_message": null,
"exception_traceback": null,
"raised_exception": false
},
"expectation_config": {
"expectation_type": "expect_column_to_exist",
"kwargs": {
"batch_id": "46f2769bf8c7729a40efddfa0597de22",
"column": "countray"
},
"meta": {}
},
"meta": {},
"result": {},
"success": false
},
{
"exception_info": {
"exception_message": null,
"exception_traceback": null,
"raised_exception": false
},
"expectation_config": {
"expectation_type": "expect_table_row_count_to_equal",
"kwargs": {
"batch_id": "46f2769bf8c7729a40efddfa0597de22",
"value": 10
},
"meta": {}
},
"meta": {},
"result": {
"observed_value": 102
},
"success": false
},
{
"exception_info": {
"exception_message": null,
"exception_traceback": null,
"raised_exception": false
},
"expectation_config": {
"expectation_type": "expect_column_sum_to_be_between",
"kwargs": {
"batch_id": "46f2769bf8c7729a40efddfa0597de22",
"column": "active_stores",
"max_value": 1000,
"min_value": 100
},
"meta": {}
},
"meta": {},
"result": {
"observed_value": 22075.0
},
"success": false
}
],
"statistics": {
"evaluated_expectations": 6,
"success_percent": 50.0,
"successful_expectations": 3,
"unsuccessful_expectations": 3
},
"success": false
}
I wanted to derive a table with with values with below lineage -
data_source : hardcode value
run_time : meta.run_id.run_time
expectation_type : results.expectation_config.expectation_type
expectations : results.expectation_config.kwargs (all values except batch_id in a dictionary)
results : results.result (everything as a dictionary)
Expected Result
+-------------------+--------------------------------+------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------+
|data_source |run_time |expectation_type |expectations |results |success |
+-------------------+--------------------------------+------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------+
|hardcoded_value |2022-02-24T05:43:16.678220+00:00|expect_column_to_exist |{"column": "country"} |{} |true |
|hardcoded_value |2022-02-24T05:43:16.678220+00:00|expect_column_values_to_not_be_null |{"column": "country"} |{"element_count": 102, "partial_unexpected_counts": [], "partial_unexpected_index_list": null, "partial_unexpected_list": [], "unexpected_count": 0, "unexpected_percent": 0.0} |true |
|hardcoded_value |2022-02-24T05:43:16.678220+00:00|expect_column_values_to_be_of_type |{"column": "country","type_": "StringType"} |{"observed_value": "StringType"} |true |
|hardcoded_value |2022-02-24T05:43:16.678220+00:00|expect_column_to_exist |{"column": "countray"} |{} |false |
|hardcoded_value |2022-02-24T05:43:16.678220+00:00|expect_table_row_count_to_equal |{"value": 10} |{"observed_value": 102} |false |
|hardcoded_value |2022-02-24T05:43:16.678220+00:00|expect_column_sum_to_be_between |{"column": "active_stores","max_value": 1000,"min_value": 100} |{"observed_value": 22075.0} |false |
+-------------------+--------------------------------+------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------+
Can someone please help me with this.
Thank you in advance.

Convert the json into dataframe using the spark.read.json function.
After that, it gives you df with parent keys as separate columns.
After that you need to explode the results column using explode function of spark.sql.functions. For more details, read this
Then just select the fields that you need from the exploded column.
from pyspark.sql.functions import explode
df = spark.read.json(json_path)
df = df.select(df.meta.run_id.run_time, df.results)
df = df.withColumn("exploded_results", explode(df.results))
df = df.select(df.meta.run_id.run_time, df.exploded_results.expectation_config.expectation_type, df.exploded_results.expectation_config.kwargs, df.exploded_results.result, df.exploded_results.success)

Related

Opensearch transform splitting array values to new events

I'm transforming index that contains following event.
But the values inside of array are splitting into the new events.
e.g:
"serviceIdentifiers": "Redis"
"serviceIdentifiers":"Event_Detector Servicc"
etc.
{
"_index": "collated_txn_health_2022.05",
"_type": "_doc",
"_id": "LAUpboIBh6CUatILrsN3",
"_score": 1,
"_source": {
"timeInGMT": 0,
"kpiId": 0,
"compInstanceIdentifier": "d0352b7d-0484-4714-bbc8-eb67cbb7be70",
"agentIdentifier": "ComponentAgent-171",
"kpiIdentifier": "PACKETS_DROPPED",
"categoryIdentifier": "Network Utilization",
"applicationIdentifier": null,
"serviceIdentifiers": [
"Supervisor_Controller Service",
"Event_Detector Service",
"UI_Service",
"Redis",
"CC_Service"
],
"clusterIdentifiers": [
"a5c57ef5-4018-41b8-b727-27c8f8376c0e"
],
"collectionInterval": 60,
"value": "0.0",
"kpiType": "Core",
"groupAttribute": "ALL",
"groupIdentifier": null,
"watcherValue": null,
"errorCode": null,
"clusterOperation": null,
"aggLevelInMins": 1,
"error": false,
"kpiGroup": false,
"discovery": false,
"maintenanceExcluded": false,
"#timestamp": "2022-05-01T01:32:00.000Z"
}
Following is the transform job configuration.
curl -u admin:admin -XPUT "http://XXX.XXX.XX.XXX9201/_plugins/_transform/my-array-job-2" -H 'Content-type: application/json' -d'
{
"transform": {
"schedule": {
"interval": {
"start_time": 1659705000000,
"period": 1,
"unit": "Minutes"
}
},
"metadata_id": null,
"updated_at": 1659456180000,
"enabled": true,
"enabled_at": 1659457620000,
"description": "",
"source_index": "collated_txn_health_2022.05",
"data_selection_query": {
"match_all": {
"boost": 1
}
},
"target_index": "transform_collated_txn_health_2022.05",
"page_size": 1000,
"groups": [
{
"date_histogram": {
"fixed_interval": "1m",
"source_field": "#timestamp",
"target_field": "#timestamp",
"timezone": "Asia/Calcutta"
}
},
{
"terms": {
"source_field": "clusterIdentifiers",
"target_field": "clusterIdentifiers"
}
},
{
"terms": {
"source_field": "serviceIdentifiers",
"target_field": "serviceIdentifiers"
}
},
{
"terms": {
"source_field": "compInstanceIdentifier",
"target_field": "compInstanceIdentifier"
}
},
{
"terms": {
"source_field": "agentIdentifier",
"target_field": "agentIdentifier"
}
}
],
"aggregations": {
"count_#timestamp": {
"value_count": {
"field": "#timestamp"
}
}
}
}
}'
Following are the events from the transform index.
{
"_index": "transform_heal_collated_txn_health_2022.05",
"_type": "_doc",
"_id": "ybK0McQ9NZrt9xdo9iWKbA",
"_score": 1,
"_source": {
"transform._id": "my-array-job-2",
"transform._doc_count": 2,
"#timestamp": 1651365120000,
"clusterIdentifiers": "a5c57ef5-4018-41b8-b727-27c8f8376c0e",
"serviceIdentifiers": "Redis",
"compInstanceIdentifier": "a5c57ef5-4018-41b8-b727-27c8f8376c0e",
"agentIdentifier": "ComponentAgent-170",
"count_#timestamp": 2
}
},
{
"_index": "transform_heal_collated_txn_health_2022.05",
"_type": "_doc",
"_id": "Wf-4KwnFaYuw9bL-V-9WEQ",
"_score": 1,
"_source": {
"transform._id": "my-array-job-2",
"transform._doc_count": 2,
"#timestamp": 1651365120000,
"clusterIdentifiers": "a5c57ef5-4018-41b8-b727-27c8f8376c0e",
"serviceIdentifiers": "Redis_Server Service",
"compInstanceIdentifier": "a5c57ef5-4018-41b8-b727-27c8f8376c0e",
"agentIdentifier": "ComponentAgent-170",
"count_#timestamp": 2
}
It would be a great help if somebody suggest me with solution for array fields.
Have solved the issue with following painless script. Which help to transform array fields in opensearch.
PUT _plugins/_transform/my-array-job-2
{
"transform": {
"schedule": {
"interval": {
"start_time": 1659705000000,
"period": 1,
"unit": "Minutes"
}
},
"metadata_id": null,
"updated_at": 1659456180000,
"enabled": true,
"enabled_at": 1659457620000,
"description": "",
"source_index": "heal_collated_txn_heal_health_2022.05_reindex",
"target_index": "transform_heal_collated_txn_heal_health_2022.05",
"page_size": 1000,
"groups": [
{
"date_histogram": {
"fixed_interval": "1m",
"source_field": "#timestamp",
"target_field": "#timestamp",
"timezone": "Asia/Calcutta"
}
},
{
"terms": {
"source_field": "kpiIdentifier",
"target_field": "kpiIdentifier"
}
},
{
"terms": {
"source_field": "clusterIdentifiers",
"target_field": "clusterIdentifiers"
}
}
],
"aggregations": {
"count_#timestamp": {
"value_count": {
"field": "#timestamp"
}
},
"count_agentIdentifier": {
"value_count": {
"field": "agentIdentifier"
}
},
"sum_value": {
"sum": {
"field": "value"
}
},
"max_value": {
"max": {
"field": "value"
}
},
"avg_value": {
"avg": {
"field": "value"
}
},
"count_value": {
"value_count": {
"field": "value"
}
},
"percentiles_value": {
"percentiles": {
"field": "value",
"percents": [
95
],
"keyed": true,
"tdigest": {
"compression": 100
}
}
},
"serviceIdentifiers": {
"scripted_metric": {
"init_script": "state.docs = []",
"map_script": """
Map span = [
'url':doc['serviceIdentifiers']
];
state.docs.add(span)
""",
"combine_script": "return state.docs;",
"reduce_script": """
def all_docs = [];
for (s in states) {
for (span in s) {
all_docs.add(span);
}
}
def size = all_docs.size();
def serviceIdentifiers_1 = all_docs[0]['url'];
def ret = new HashMap();
ret['serviceIdentifiers'] = serviceIdentifiers_1;
return ret;
"""
}
}
}
}
}

Creating Custom Account for Stripe

I tried creating Stripe Custom account on test mode
Request
const createAccountTeacher = catchAsync(async (req, res) =>
{
const date = new Date();
let time = Math.floor(date.getTime() / 1000);
const email = req.body.email;
const country = req.body.country;
const createAccount = await stripe.accounts.create({
type: 'custom',
country: country,
business_type: 'individual',
tos_acceptance: {
date: time,
ip: '8.8.8.8',
},
business_profile: {
mcc: '5734',
url: 'http://google.com',
product_description: 'Good Product',
support_phone: '12345567',
product_description: 'Teaching Courses available',
support_phone: '+10000000000',
},
individual: {
first_name: 'ABC',
last_name: 'XYZ',
dob: {
day: 24,
month: 6,
year: 1992,
},
address: {
line1: '1996 W Highland Ave',
postal_code: 90002,
city: ' San Bernardino',
state: 'California',
},
email: email,
phone: '+1202-555-0454',
ssn_last_4: 9999,
id_number: 123459999,
},
external_account: {
object: 'bank_account',
country: 'US',
currency: 'usd',
account_number: '000123456789',
routing_number: 121000358,
},
capabilities: {
card_payments: { requested: true },
transfers: { requested: true },
},
});
if (createAccount) {
res.status(200).json({ data: createAccount });
}
});
Here as response
{
"data":
{
"details_submitted": true,
"type": "custom",
"metadata": {},
"id": "acct_1LNcOTD4Ev4rC234",
"object": "account",
"default_currency": "usd",
"capabilities": {
"transfers": "pending",
"card_payments": "pending"
},
"business_type": "individual",
"individual": {
"metadata": {},
"id": "person_1LNcOVD4Ev4rC234OQjvuHiP",
"object": "person",
"account": "acct_1LNcOTD4Ev4rC234",
"dob": {
"year": 1992,
"day": 24,
"month": 6
},
"requirements": {
"currently_due": [],
"past_due": [],
"eventually_due": [],
"pending_verification": [
"id_number",
"verification.document"
],
"errors": [],
"alternatives": []
},
"ssn_last_4_provided": true,
"phone": "+12025550454",
"relationship": {
"percent_ownership": null,
"title": null,
"owner": false,
"director": false,
"representative": true,
"executive": false
},
"future_requirements": {
"currently_due": [],
"past_due": [],
"eventually_due": [],
"pending_verification": [],
"errors": [],
"alternatives": []
},
"verification": {
"status": "pending",
"details": null,
"document": {
"details_code": null,
"front": null,
"details": null,
"back": null
},
"additional_document": {
"details_code": null,
"front": null,
"details": null,
"back": null
},
"details_code": null
},
"address": {
"line2": null,
"line1": "1996 W Highland Ave",
"state": "California",
"postal_code": "90002",
"city": " San Bernardino",
"country": "US"
},
"email": "lilypota#ema-sofia.eu",
"created": 1658321573,
"first_name": "ABC",
"id_number_provided": true,
"last_name": "XYZ"
},
"charges_enabled": false,
"settings": {
"dashboard": {
"display_name": "Google",
"timezone": "Etc/UTC"
},
"payouts": {
"debit_negative_balances": false,
"statement_descriptor": null,
"schedule": {
"interval": "daily",
"delay_days": 2
}
},
"card_issuing": {
"tos_acceptance": {
"ip": null,
"date": null
}
},
"bacs_debit_payments": {},
"payments": {
"statement_descriptor_kanji": null,
"statement_descriptor_kana": null,
"statement_descriptor": "GOOGLE.COM"
},
"sepa_debit_payments": {},
"card_payments": {
"statement_descriptor_prefix_kanji": null,
"statement_descriptor_prefix": null,
"statement_descriptor_prefix_kana": null,
"decline_on": {
"avs_failure": false,
"cvc_failure": false
}
},
"branding": {
"icon": null,
"secondary_color": null,
"logo": null,
"primary_color": null
}
},
"tos_acceptance": {
"ip": "8.8.8.8",
"user_agent": null,
"date": 1658321567
},
"requirements": {
"current_deadline": null,
"past_due": [],
"errors": [],
"disabled_reason": "requirements.pending_verification",
"pending_verification": [
"individual.id_number",
"individual.verification.document"
],
"currently_due": [],
"eventually_due": [],
"alternatives": []
},
"payouts_enabled": false,
"company": {
"tax_id_provided": false,
"phone": "+12025550454",
"owners_provided": true,
"verification": {
"document": {
"details_code": null,
"front": null,
"details": null,
"back": null
}
},
"address": {
"line2": null,
"line1": "1996 W Highland Ave",
"state": "California",
"postal_code": "90002",
"city": " San Bernardino",
"country": "US"
},
"executives_provided": true,
"directors_provided": true,
"name": null
},
"external_accounts": {
"has_more": false,
"total_count": 1,
"object": "list",
"url": "/v1/accounts/acct_1LNcOTD4Ev4rC234/external_accounts",
"data": [
{
"last4": "6789",
"account_holder_name": null,
"metadata": {},
"id": "ba_1LNcOUD4Ev4rC234XwzzfiqR",
"object": "bank_account",
"account_type": null,
"default_for_currency": true,
"account_holder_type": null,
"account": "acct_1LNcOTD4Ev4rC234",
"status": "new",
"available_payout_methods": [
"standard"
],
"bank_name": "BANK OF AMERICA, N.A.",
"currency": "usd",
"country": "US",
"routing_number": "121000358",
"fingerprint": "gqPBt6FUMZJkqc9q"
}
]
},
"future_requirements": {
"current_deadline": null,
"past_due": [],
"errors": [],
"disabled_reason": null,
"pending_verification": [],
"currently_due": [],
"eventually_due": [],
"alternatives": []
},
"country": "US",
"email": null,
"created": 1658321576,
"business_profile": {
"support_email": null,
"product_description": "Teaching Courses available",
"mcc": "5734",
"support_url": null,
"support_address": null,
"url": "http://google.com",
"support_phone": "+10000000000",
"name": null
}
}
}
Custom Account is created but the problem it is restricted because of identity document
I am trying to upload the document like this
const updateAccount = catchAsync(async (req, res) => {
// let imagepath = ${req.protocol}://${req.get('host')}/uploads/${req.file.filename};
if(req.file.path){
const file = await stripe.files.create({
purpose: 'identity_document',
file: {
data: fs.readFileSync(req.file.path),
name: req.file.filename,
type: 'application/octet-stream',
},
}, {
stripeAccount: 'acct_1LNcOTD4Ev4rC234',
});
if(file){
res.status(200).json({data:file})
}
}
})
Still the custom account is restricted.
I would appreciate little help.

Filter nested result inside a nested object with elasticsearch

I'm trying to filter a nested object and sort by the result, however, I tried some things without success, I'll leave my initial attempt and it works partially, it just filters according to what I have in my search variable, but all the results come of this nested object as it is inside the 'root' which is another nested object
Elastic version: 7.13.0 with NodeJS
using #elastic/elasticsearch official package from npm
let params: RequestParams.Search = {
index: index,
body: {
size: 30,
query: {
bool: {
must: [
{
nested: {
path: "profile",
query: {
bool: {
must: [
{
match: {
"profile.id": profileId,
},
},
],
},
},
},
},
],
filter: [
{
nested: {
path: "profile.following",
ignore_unmapped: true,
query: {
query_string: {
fields: [
"profile.following.name",
"profile.following.username",
],
query: searchWord + "*",
},
},
},
},
],
},
},
},
};
I need it to be this specific 'profile.id' that is passed by parameter in the function, so the result is only 1 profile with N people that it follows
the document is mapped as follows, I left only the fields relevant to the question:
{
"mappings": {
"_doc": {
"properties": {
"id": {
"type": "integer"
},
"phone": {
"type": "text"
},
"profile": {
"type": "nested",
"properties": {
"id": {
"type": "integer"
},
"username": {
"type": "text"
},
"following": {
"type": "nested",
"properties": {
"id": {
"type": "integer"
},
"isAwaitingApproval": {
"type": "boolean"
},
"name": {
"type": "text"
},
"profilePicURL": {
"type": "text"
},
"username": {
"type": "text"
}
}
}
}
}
}
}
}
}
an example of a current result is:
with the following parameters (profileId:141, searchWord: "para" )
{
"res": [
{
"profilePicURL": "localimage",
"name": "donor donor",
"id": 140,
"username": "victorTesteElastic2",
"isAwaitingApproval": false
},
{
"profilePicURL": "localimage",
"name": "para ser seguido",
"id": 142,
"username": "victorprivate",
"isAwaitingApproval": true
}
]
}
the desired result is:
{
"res": [
{
"profilePicURL": "localimage",
"name": "para ser seguido",
"id": 142,
"username": "victorprivate",
"isAwaitingApproval": true
}
]
}
with some more research I got what I needed, I'll leave the answer here in case anyone needs it too
let params: RequestParams.Search = {
index: index,
body: {
size: 30,
query: {
bool: {
must: [
{
nested: {
path: "profile",
query: {
bool: {
must: [
{
match: {
"profile.id": profileId,
},
},
],
},
},
},
},
{
nested: {
path: "profile",
inner_hits: {
name: "profile",
},
query: {
nested: {
path: "profile.following",
inner_hits: {
name: "following",
},
ignore_unmapped: true,
query: {
query_string: {
fields: [
"profile.following.name",
"profile.following.username",
],
query: searchWord + "*",
},
},
},
},
},
},
],
},
},
},
};
I basically put in must what was in the filter, mapped the nested object from above, in this case the profile, and put the tag inner_hits for profile and inner_hits for followings, that's the only way it worked
the answer I need was returned here:
body.hits.hits[0].inner_hits.profile.hits.hits[0].inner_hits.following.hits.hits
below is an example of the answer:
{
"res": [
{
"_index": "donor",
"_type": "_doc",
"_id": "P3VWNnsB4coAEhD-F3fF",
"_nested": {
"field": "profile",
"offset": 0,
"_nested": {
"field": "following",
"offset": 0
}
},
"_score": 1,
"_source": {
"profilePicURL": "localimage",
"name": "donor donor",
"id": 140,
"username": "victorTesteElastic2",
"isAwaitingApproval": false
}
},
{
"_index": "donor",
"_type": "_doc",
"_id": "P3VWNnsB4coAEhD-F3fF",
"_nested": {
"field": "profile",
"offset": 0,
"_nested": {
"field": "following",
"offset": 1
}
},
"_score": 1,
"_source": {
"profilePicURL": "localimage",
"name": "para ser seguido",
"id": 142,
"username": "victorprivate",
"isAwaitingApproval": true
}
}
]
}
the filtered data I really need that have been matched in must is in this array, where I need to iterate and look at _source which is the data that is indexed

in apollo client pagination configuration with "merge" function, existing cached data always is empty even after calling fetchMore

I'm new to Apollo Client and I'm trying to implement pagination for my product list. but I can't figure out why the existing parameter in the merge function always returns empty. my incoming parameter always updates with a new list each time I call fetchMore but the existing parameter always is empty That's why I can't merge the new list with the old one.
this is my client configuration:
/* eslint-disable #typescript-eslint/no-unsafe-return */
import { ApolloClient, InMemoryCache } from '#apollo/client'
import { AppEndpoints } from './const'
import { createLink } from './links'
const cache = new InMemoryCache({
typePolicies: {
ListProductSearchType: {
fields: {
items: {
keyArgs: false,
// eslint-disable-next-line prefer-arrow/prefer-arrow-functions
merge: (existing = [], incoming, { args }) => {
console.log('>>>args', args)
console.log('>>>existing', existing) // it's always empty
console.log('>>>incoming', incoming)
return [...existing, ...incoming]
},
},
},
},
},
})
const link = createLink(AppEndpoints.main)
const client = new ApolloClient({
cache,
// ssrMode: false,
link,
defaultOptions: {
mutate: {
errorPolicy: 'ignore',
},
query: {
fetchPolicy: 'cache-first',
},
},
})
export default client
this is my Graphql response:
{
"data": {
"productSearch": {
"listDto": {
"count": 10,
"items": [
{
"id": "1d37d4fe-79d9-440a-8869-2dca0327791b",
"code": "780133 Iceland Poppy",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 429.29,
"compareAtPrice": 240.4,
"hasDiscount": true,
"visited": 27685,
"salesCount": 8148,
"createdDateTime": "2020-12-14T06:02:38.0469339+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "ae15c925-75ef-4dde-aa07-0eeb1bbb75c8",
"code": "330338 Amaranth",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 234.8,
"compareAtPrice": 211.32,
"hasDiscount": true,
"visited": 27660,
"salesCount": 6374,
"createdDateTime": "2020-12-05T15:04:37.4237772+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "de23a1f8-5e79-4cf9-88f0-57518c42a82c",
"code": "690156 Snowflake",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 110.11,
"compareAtPrice": 88.09,
"hasDiscount": true,
"visited": 27141,
"salesCount": 2278,
"createdDateTime": "2020-10-18T11:27:38.0467775+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "fb298a9c-a3d7-4c0e-a96e-a552b98d340f",
"code": "375033 Peony",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 337.68,
"compareAtPrice": 151.96,
"hasDiscount": true,
"visited": 27050,
"salesCount": 2483,
"createdDateTime": "2020-12-06T22:57:37.4236274+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "d017638f-3062-49bf-99cc-0e06ba0882b9",
"code": "112093 Hyacinth, wild",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 460.43,
"compareAtPrice": 326.91,
"hasDiscount": true,
"visited": 26843,
"salesCount": 530,
"createdDateTime": "2020-11-10T23:13:37.4235865+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "682a3c04-a462-4cbd-be8f-8b65d024b73f",
"code": "914276 Iceland Poppy",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 126.81,
"compareAtPrice": 100.18,
"hasDiscount": true,
"visited": 24055,
"salesCount": 6328,
"createdDateTime": "2021-01-05T11:05:38.0469862+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "c48819e2-52f4-4324-9f11-616efbc1a744",
"code": "494847 Persian Candytuft",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 405.95,
"compareAtPrice": 288.22,
"hasDiscount": true,
"visited": 23713,
"salesCount": 7474,
"createdDateTime": "2020-10-23T16:24:37.4236199+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "7118ddd5-56cf-4e12-9665-accb5abf3f73",
"code": "682251 Violet",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 184.09,
"compareAtPrice": 90.2,
"hasDiscount": true,
"visited": 23448,
"salesCount": 6196,
"createdDateTime": "2020-10-12T08:36:38.0469107+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "9e69b51a-560e-4d5e-b956-d9438d996c61",
"code": "982376 Calendula",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 62.25,
"compareAtPrice": 38.6,
"hasDiscount": true,
"visited": 23300,
"salesCount": 9072,
"createdDateTime": "2020-10-10T14:24:38.0463778+00:00",
"__typename": "ProductSearchDto"
},
{
"id": "623dde57-8daf-4637-b2d3-0ebbf166aad0",
"code": "138453 Manchineel",
"isMyfavorite": false,
"currency": "$",
"imageUrl": "https://devcdn.sonbol.nl/Product/flower.jpg",
"price": 121.92,
"compareAtPrice": 56.08,
"hasDiscount": true,
"visited": 22373,
"salesCount": 4735,
"createdDateTime": "2020-10-11T12:04:37.4235489+00:00",
"__typename": "ProductSearchDto"
}
],
"__typename": "ListProductSearchType"
},
"__typename": "GenericQueryResponseProductSearchType"
}
}
}
and this is my query:
export const GetProductSearchDocument = /*#__PURE__*/ gql`
query GetProductSearch($filter: GenericFilterRequestProductSearchReqInputType!) {
productSearch(filter: $filter) {
listDto {
count
items {
id
code
isMyfavorite
currency
imageUrl
price
compareAtPrice
hasDiscount
visited
salesCount
createdDateTime
}
}
}
}
i'm calling fetchMore like this:
const [pageIndex, setpageIndex] = useState(0)
const { productResults, loading, fetchMore } = useQueryProductSearchData()
// eslint-disable-next-line #typescript-eslint/unbound-method
const { formatMessage } = useIntl()
useEffect(() => {
if (pageIndex !== 0) {
fetchMore({
variables: {
filter: {
pageSize: 10,
pageIndex,
dto: {
filters: [],
},
},
},
})
}
}, [fetchMore, pageIndex])
const onViewMore = () => {
setpageIndex((pre: any) => pre + 1)
}
Try adding keyFields: [], like:
typePolicies: {
ListProductSearchType: {
keyFields: [],
fields: {
items: {
keyArgs: false,
// eslint-disable-next-line prefer-arrow/prefer-arrow-functions
merge: (existing = [], incoming, { args }) => {
console.log('>>>args', args)
console.log('>>>existing', existing) // it's always empty
console.log('>>>incoming', incoming)
return [...existing, ...incoming]
},
},
},
},
},
You should add read function to your field policy
const cache = new InMemoryCache({
typePolicies: {
ListProductSearchType: {
fields: {
items: {
//your code here
//then add this function
read(existing) {
return existing
},
},
},
},
},
})

How do one should set a custom agent pool in DevOps release definition?

I create release definitions using DevOps REST APIs. Due to lack of documentation I used to capture HTTP requests and examine JSON payload.
I'm able to set a release using Azure agent pools. It follows only the relevant node:
"deploymentInput": {
"parallelExecution": {
"parallelExecutionType": 0
},
"agentSpecification": {
"identifier": "windows-2019"
},
"skipArtifactsDownload": false,
"artifactsDownloadInput": {},
"queueId": 749,
"demands": [],
"enableAccessToken": false,
"timeoutInMinutes": 0,
"jobCancelTimeoutInMinutes": 1,
"condition": "succeeded()",
"overrideInputs": {},
"dependencies": []
}
I want to set a custom defined agent pool, but if I try to capture the request I still can't undertand how to set it. This is the full JSON of an empty release with custom agent set:
{
"id": 0,
"name": "New release pipeline",
"source": 2,
"comment": "",
"createdOn": "2020-10-31T10:02:19.034Z",
"createdBy": null,
"modifiedBy": null,
"modifiedOn": "2020-10-31T10:02:19.034Z",
"environments": [
{
"id": -1,
"name": "Stage 1",
"rank": 1,
"variables": {},
"variableGroups": [],
"preDeployApprovals": {
"approvals": [
{
"rank": 1,
"isAutomated": true,
"isNotificationOn": false,
"id": 0
}
],
"approvalOptions": {
"executionOrder": 1
}
},
"deployStep": {
"tasks": [],
"id": 0
},
"postDeployApprovals": {
"approvals": [
{
"rank": 1,
"isAutomated": true,
"isNotificationOn": false,
"id": 0
}
],
"approvalOptions": {
"executionOrder": 2
}
},
"deployPhases": [
{
"deploymentInput": {
"parallelExecution": {
"parallelExecutionType": 0
},
"agentSpecification": null,
"skipArtifactsDownload": false,
"artifactsDownloadInput": {},
"queueId": 1039,
"demands": [],
"enableAccessToken": false,
"timeoutInMinutes": 0,
"jobCancelTimeoutInMinutes": 1,
"condition": "succeeded()",
"overrideInputs": {},
"dependencies": []
},
"rank": 1,
"phaseType": 1,
"name": "Agent job",
"refName": null,
"workflowTasks": [],
"phaseInputs": {
"phaseinput_artifactdownloadinput": {
"artifactsDownloadInput": {},
"skipArtifactsDownload": false
}
}
}
],
"runOptions": {},
"environmentOptions": {
"emailNotificationType": "OnlyOnFailure",
"emailRecipients": "release.environment.owner;release.creator",
"skipArtifactsDownload": false,
"timeoutInMinutes": 0,
"enableAccessToken": false,
"publishDeploymentStatus": true,
"badgeEnabled": false,
"autoLinkWorkItems": false,
"pullRequestDeploymentEnabled": false
},
"demands": [],
"conditions": [
{
"conditionType": 1,
"name": "ReleaseStarted",
"value": ""
}
],
"executionPolicy": {
"concurrencyCount": 1,
"queueDepthCount": 0
},
"schedules": [],
"properties": {
"LinkBoardsWorkItems": false,
"BoardsEnvironmentType": "unmapped"
},
"preDeploymentGates": {
"id": 0,
"gatesOptions": null,
"gates": []
},
"postDeploymentGates": {
"id": 0,
"gatesOptions": null,
"gates": []
},
"environmentTriggers": [],
"owner": {
"displayName": "Giacomo Stelluti Scala",
"id": "3617734a-1751-66f2-8343-c71c1398b5e6",
"isAadIdentity": true,
"isContainer": false,
"uniqueName": "giacomo.stelluti#dev4side.com",
"url": "https://dev.azure.com/dev4side/"
},
"retentionPolicy": {
"daysToKeep": 30,
"releasesToKeep": 3,
"retainBuild": true
},
"processParameters": {}
}
],
"artifacts": [],
"variables": {},
"variableGroups": [],
"triggers": [],
"lastRelease": null,
"tags": [],
"path": "\\test-poc",
"properties": {
"DefinitionCreationSource": "ReleaseNew",
"IntegrateJiraWorkItems": "false",
"IntegrateBoardsWorkItems": false
},
"releaseNameFormat": "Release-$(rev:r)",
"description": ""
}
Where do this is agent is set? Anyone knows how to do it properly?
Any help really appreciated.
Giacomo S. S.
I've found the solution in this question.
"deploymentInput": {
"parallelExecution": {
"parallelExecutionType": 0
},
"agentSpecification": null,
"skipArtifactsDownload": false,
"artifactsDownloadInput": {},
"queueId": 1039,
"demands": [],
"enableAccessToken": false,
"timeoutInMinutes": 0,
"jobCancelTimeoutInMinutes": 1,
"condition": "succeeded()",
"overrideInputs": {},
"dependencies": []
}
agentSpecification must be null and queueId must be set.

Resources