python runs slow while executing below piece of code - linux

tag_api=”htpps://url....”
json_data=requests.get(tag_api)
length=len(json_data[‘tags’])
line=0
while length>=2:
if line<=lenght-1:
with open(‘file.txt’,’a’) as fp:
fp.write(json_data[‘tags’][line][‘name’])
line=line+1
While I run this code it gets slow at while loop. Do you suggest what could be the issue?
{ {
{ "id": "",
"category": "",
"type": "",
"tags": { "id": "", "name": "" }
{ "id": "", "name": "" } },
{ "id": "",
"category": "",
"type": "",
"tags": { "id": "", "name": "" } },
{ "id": "",
"category": "",
"type": "",
"tags": { "id": "", "name": "" } },
{ "id": "",
"category": "",
"type": "",
"tags": { "id": "", "name": "" }
{ "id": "", "name": "" } },
} }
I am trying to get the tags that have more than two index and write to a file

Opening a file (and closing it) are quite slow operations. Since it's the same file every time, you should move the opening out as far as possible.

Related

Why does this error pop up in the slash command thing?

this is the slash command:
const lib = require('lib')({token: process.env.STDLIB_SECRET_TOKEN});
await lib.discord.commands['#0.0.0'].create({
"guild_id": "1018895233003569193",
"name": "submit-hw",
"description": "trimite tema",
"options": [
{
"type": 3,
"name": "materie",
"description": "",
"choices": [
{
"name": "romana",
"value": "romana"
},
{
"name": "mate",
"value": "mate"
},
{
"name": "religie",
"value": "religie"
},
{
"name": "engleza",
"value": "engleza"
},
{
"name": "franceza",
"value": "franceza"
},
{
"name": "desen",
"value": "desen"
},
{
"name": "sport",
"value": "sport"
},
{
"name": "biologie",
"value": "biologie"
},
{
"name": "fizica",
"value": "fizica"
},
{
"name": "tic",
"value": "tic"
},
{
"name": "istorie",
"value": "istorie"
},
{
"name": "chimie",
"value": "chimie"
},
{
"name": "consiliere",
"value": "consiliere"
},
{
"name": "sociala",
"value": "sociala"
},
{
"name": "optional",
"value": "optional"
},
{
"name": "tehnologica",
"value": "tehnologica"
},
{
"name": "geografie",
"value": "geografie"
},
{
"name": "muzica",
"value": "muzica"
}
],
"required": true
},
{
"type": 3,
"name": "deadline",
"description": "",
"required": true
},
{
"type": 3,
"name": "tema",
"description": "",
"required": true
}
]
});
and this is the error
It says:
Sorry, could not save you slash commands: Invalid Form Body: code 50035:
Can someone help me out? I wrote all of the command options in the ui text boxes, not as code. SO won't let me post this without more string of text thats not code and i have no idea what to type.....
Description is required for all slash commands and options, including subcommands and subcommand groups. Just a heads up, Error code 50035 would go into detail to show which part is causing an error, such as one of the options type being incorrect.

How can I get init_point or sandbox_init_point after calling payment.create API with mercadopago?

I have implemented the preference API and I am getting the response which I have attached. Now I am confused about how to get refund with this response or is it possible or not. Can you please suggest some API related to the refund policy?
Thanks for the cooperation in advance.
The response which I am getting:
{
"processing_modes": [],
"metadata": {},
"binary_mode": false,
"payment_methods": {
"excluded_payment_methods": [
{
"id": ""
}
],
"excluded_payment_types": [
{
"id": ""
}
],
"installments": null,
"default_payment_method_id": null,
"default_installments": null
},
"collector_id": 472389951,
"operation_type": "regular_payment",
"items": [
{
"id": "",
"picture_url": "",
"title": "Mojito",
"description": "Cheesy 8",
"category_id": "Blah Blah",
"currency_id": "ARS",
"quantity": 11,
"unit_price": 456
}
],
"payer": {
"name": "",
"surname": "",
"email": "pappu#testuser.com",
"date_created": "",
"phone": {
"area_code": "",
"number": ""
},
"identification": {
"type": "",
"number": ""
},
"address": {
"street_name": "",
"street_number": null,
"zip_code": ""
}
},
"back_urls": {
"success": "http://eb7b02b9.ngrok.io/notification/ipn",
"pending": "http://eb7b02b9.ngrok.io/notification/ipn",
"failure": "http://eb7b02b9.ngrok.io/notification/ipn"
},
"auto_return": "",
"client_id": "3767495422255439",
"marketplace": "NONE",
"marketplace_fee": 0,
"shipments": {
"receiver_address": {
"zip_code": "",
"street_number": null,
"street_name": "",
"floor": "",
"apartment": ""
}
},
"notification_url": "http://fd7842f1.ngrok.io/notification/ipn",
"external_reference": "",
"additional_info": "",
"expires": false,
"expiration_date_from": null,
"expiration_date_to": null,
"date_created": "2019-10-03T03:35:03.228-04:00",
"id": "472389951-fdfcf414-18b7-44b3-afd5-5d3cd73fbfc2",
"init_point": "https://www.mercadopago.com.ar/checkout/v1/redirect?pref_id=472389951-fdfcf414-18b7-44b3-afd5-5d3cd73fbfc2",
"sandbox_init_point": "https://sandbox.mercadopago.com.ar/checkout/v1/redirect?pref_id=472389951-fdfcf414-18b7-44b3-afd5-5d3cd73fbfc2"
}

Unable to parse list of Json blocks in U-SQL

I have a file with list of json blocks and am stuck with processing/Reading them in U-Sql and writing to a text file.
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
{
"id": "0002",
"type": "nut",
"name": "ake",
"ppu": 1.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
{
"id": "0003",
"type": "test",
"name": "ake",
"ppu": 1.55,
"batters":
{
"batter":
[
]
},
"topping":
[
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
can someone help me on this.
REFERENCE ASSEMBLY [Newtonsoft.Json];
REFERENCE ASSEMBLY [Microsoft.Analytics.Samples.Formats];
DECLARE #Full_Path string = #"C:\Users\test\Desktop\File\JsonTest.json";
USING [Microsoft.Analytics.Samples.Formats];
#RawExtract =
EXTRACT
[RawString] string
FROM
#Full_Path
USING
Extractors.Text(delimiter:'\n', quoting : false);
#ParsedJSONLines =
SELECT JsonFunctions.JsonTuple([RawString]) AS JSONLine
FROM #RawExtract;
#StagedData =
SELECT
JSONLine["id"] AS Id,
JSONLine["name"] AS Name,
JSONLine["type"] AS Type,
JSONLine["ppu"] AS PPU,
JSONLine["batters"] AS Batter
FROM
#ParsedJSONLines;
DECLARE #Output_Path string = #"C:\Users\Test\Desktop\File\Test2.csv";
OUTPUT #StagedData
TO #Output_Path
USING Outputters.Csv();
Am receiving error while evaluating expression .
Error while evaluating expression JsonFunctions.JsonTuple(RawString)
You cant use an Text Extraxtor to extract Json, unless you use Json Lines.
Using the extractor will split the json and you will get the error.
Use JsonExtractor instead of Text extractor.
https://github.com/Azure/usql/blob/master/Examples/DataFormats/Microsoft.Analytics.Samples.Formats/Json/JsonExtractor.cs

Jira Python Keyerror on issue links

I have a script which goes to fetch info via Jira API then its doing some comparison but i'm getting the following error :
Error :
"errorMessage": "'outwardIssue'"
"errorType": "KeyError"
JSON format:
"issuelinks": [
{
"id": "1255",
"self": "https://jiraurl/rest/api/2/issueLink/0000",
"type": {
"id": "10030",
"name": "Arises ",
"inward": "gives rise to",
"outward": "arises from",
"self": "https://jiraurl/rest/api/2/issueLinkType/0000"
},
"outwardIssue": {
"id": "1251575",
"key": "temp-511",
"self": "https://jiraurl/rest/api/2/issue/0000",
"fields": {
"summary": "a summary",
"status": {
"self": "https://jiraurl/rest/api/2/status/6",
"description": "test",
"iconUrl": "",
"name": "Closed",
"id": "6",
"statusCategory": {
"self": "https://jiraurl/rest/api/2/statuscategory/3",
"id": 3,
"key": "done",
"colorName": "green",
"name": "Done"
}
},
"issuetype": {
"self": "https://jiraurl/rest/api/2/issuetype/20",
"id": "20",
"description": "Problem ",
"iconUrl": "https://jiraurl/images/icons/issuetypes/documentation.png",
"name": "Problem",
"subtask": false
}
}
}
}
]
Python Statement where we believe the issue is:
if (problem['fields']['issuelinks'][0]['outwardIssue']) and (problem['fields']['issuelinks'][0]['type']['outward'] == "arises from"):
isitanissue = False
I cant see why its stuck on'outwardIssue' ?
try this:
issue = jira.issue("XX)
if (issue.raw['fields']['issuelinks'][0]['outwardIssue']) and (issue.raw['fields']['issuelinks'][0]['type']['outward'] == "arises from"): isitanissue = False

Kibana: Search within text for string

I have A log message in Kibana that contains this:
org.hibernate.exception.GenericJDBCException: Cannot open connection
at org.springframework.orm.hibernate3.HibernateTransactionManager.doBegin(HibernateTransactionManager.java:597)
Actual search that isn't returning results: log_message: "hibernate3"
If I search for "hibernate3" this message will not appear. I am using an Elasticsearch template and have indexed the field, but also want to be able to do case-insensitive full-text searching. Is this possible?
Template that is in use:
{
"template": "filebeat-*",
"mappings": {
"mainProgram": {
"properties": {
"#timestamp": {
"type": "date",
"format": "strict_date_optional_time||epoch_millis"
},
"#version": {
"type": "text"
},
"beat": {
"properties": {
"hostname": {
"type": "text"
},
"name": {
"type": "text"
}
}
},
"class_method": {
"type": "text",
"fielddata": "true",
"index": "true"
},
"class_name": {
"type": "text",
"fielddata": "true"
},
"clientip": {
"type": "ip",
"index": "not_analyzed"
},
"count": {
"type": "long"
},
"host": {
"type": "text",
"index": "not_analyzed"
},
"input_type": {
"type": "text",
"index": "not_analyzed"
},
"log_level": {
"type": "text",
"fielddata": "true",
"index": "true"
},
"log_message": {
"type": "text",
"index": "true"
},
"log_timestamp": {
"type": "text"
},
"log_ts": {
"type": "long",
"index": "not_analyzed"
},
"message": {
"type": "text"
},
"offset": {
"type": "long",
"index": "not_analyzed"
},
"query_params": {
"type": "text",
"index": "true"
},
"sessionid": {
"type": "text",
"index": "true"
},
"source": {
"type": "text",
"index": "not_analyzed"
},
"tags": {
"type": "text"
},
"thread": {
"type": "text",
"index": "true"
},
"type": {
"type": "text"
},
"user_account_combo": {
"type": "text",
"index": "true"
},
"version": {
"type": "text"
}
}
},
"access": {
"properties": {
"#timestamp": {
"type": "date",
"format": "strict_date_optional_time||epoch_millis"
},
"#version": {
"type": "text"
},
"beat": {
"properties": {
"hostname": {
"type": "text"
},
"name": {
"type": "text"
}
}
},
"clientip": {
"type": "ip",
"index": "not_analyzed"
},
"count": {
"type": "long",
"index": "not_analyzed"
},
"host": {
"type": "text",
"index": "true"
},
"input_type": {
"type": "text",
"index": "not_analyzed"
},
"log_timestamp": {
"type": "text"
},
"log_ts": {
"type": "long",
"index": "not_analyzed"
},
"message": {
"type": "text"
},
"offset": {
"type": "long",
"index": "not_analyzed"
},
"query_params": {
"type": "text",
"index": "true"
},
"response_time": {
"type": "long"
},
"sessionid": {
"type": "text",
"index": "true"
},
"source": {
"type": "text",
"index": "not_analyzed"
},
"statuscode": {
"type": "long"
},
"tags": {
"type": "text"
},
"thread": {
"type": "text",
"index": "true"
},
"type": {
"type": "text",
"index": "true"
},
"uripath": {
"type": "text",
"index": "true"
},
"user_account_combo": {
"type": "text",
"index": "true"
},
"verb": {
"type": "text",
"index": "true"
}
}
}
}
}
message: *.hibernate3.*
also works (please note, that no quotes are needed for that)
According to your scenario, what you're looking for is an analyzed type string which would first analyze the string and then index it. A quote from the doc.
In other words, index this field as full text.
Thus make sure that, you have your mapping of the necessary fields properly so that you'll be able to do a full-text search on the docs.
Assuming that, in Kibana if the log line is under the field message, you could simply search for the word by:
message:"hibernate3"
You might also want to refer this, to identify the variance between Term Based and Full-Text.
EDIT
Have the mapping of the field log_message as such:
"log_message": {
"type": "string", <- to make it analyzed
"index": "true"
}
Also try doing a wildcard search as such:
{"wildcard":{"log_message":"*.hibernate3.*"}}
With Kibana 6.4.1 I used the "%" as wildcard.
message: %hibernate3%
For me it was because I was using the ".keyword".
My key was called "message" and I had "message" and "message.keyword" available.
Full text search isn't working on ".keyword".
Not working :
message.keyword : hello
Working :
message : hello

Resources