I tried to follow this tutorial to create new subscriber in Moosend from my django app .
from urllib2 import Request, urlopen
values = """
{
"Name": "Paul",
"Email": "someEmail#email.com",
"HasExternalDoubleOptIn": false,
"CustomFields": [
"Age=25",
"Country=USA"
]
}"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
request = Request('https://api.moosend.com/v3/subscribers/7cfad3edfd9ad-07d7-4c51-810e-74e27sdsf8505c2e/subscribe.json?apikey=77f6s34dfd8914-4e3c-4d61-8435-9346f5b4adf6&New%20item=', data=values, headers=headers)
response_body = urlopen(request).read()
print response_body
That code WORKS IN python 2 TO CREATE new subscriber in maling list i have made some changes so that it should work in python3 but still it is still it is not inserting new subscribers on my mailing list in Moossend
changes done to work in python 3 that i have done
from urllib.request import urlopen
request2 = urllib.request.urlopen('https://api.moosend.com/v3/subscribers/7cfad3edfd9ad-07d7-4c51-810e-74e27sdsf8505c2e/subscribe.json?apikey=77f6s34dfd8914-4e3c-4d61-8435-9346f5b4adf6&New%20item=', data=values, headers=headers)
response_body = request2.read()
print(response_body)
Need help in creating new subscriber for using python3 on my Moonsend mailing list
Below is the updated code if you are using python3.5 that works and manage to add new subscriber to Moosend mailing list:
from urllib.request import urlopen
from urllib.request import Request
import urllib.parse
values = """
{
"Name": "Paul",
"Email": "someEmail#email.com",
"HasExternalDoubleOptIn": false,
"CustomFields": [
"Age=25",
"Country=USA"
]
}"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
data = values.encode()
req = Request('https://api.moosend.com/v3/subscribers/7cfad3edfd9ad-07d7-4c51-810e-74e27sdsf8505c2e/subscribe.json?apikey=77f6s34dfd8914-4e3c-4d61-8435-9346f5b4adf6&New%20item=', data=values, headers=headers)
response_body = urlopen(req).read()
print(response_body)
Related
Here's my current code:
import json
import requests
def createPage(database_id, page_id, headers, url):
newPageData = {
"parent": {
"database_id": database_id,
"page_id": page_id,
},
"properties": {
"Name": {"title": {"text": "HI THERE"}},
},
}
data = json.dumps(newPageData)
res = requests.request("POST", url, headers=headers, data=data)
print(res.status_code)
print(res.text)
database_id = "ea28de8e9cca4f62b4c4da3522869d03"
page_id = "697fd88570b3420aaa928fa28d0bf230"
url = "https://api.notion.com/v1/databases/"
key = "KEY"
payload = {}
headers = {
"Authorization": f"Bearer {key}",
"accept": "application/json",
"Notion-Version": "2021-05-11",
"content-type": "application/json",
}
createPage(database_id, page_id, headers, url)
But everytime I run this, it appears like I keep getting new databases within the page. This is before running the script:
This is after running the script:
I would like it to be like this after running the script:
How can that be achieved?
It looks as you're calling the API URL that creates a new Database, and not the one that creates a new page.
This URL: https://api.notion.com/v1/databases/ is for creating new databases, and not for creating pages.
In order to create a new page within a database, use the following URL:
https://api.notion.com/v1/pages
Where you'll need to provide the previously created database id, among other identifiers
More detailed documentation can be found here
https://developers.notion.com/reference/post-page
I'm using the AWS lambda function to send alerts to our slack channel. But, due to some unknown issue, I'm not getting slack alert and not even getting any kind of error message from the AWS lambda function. Logs represent that the function ran successfully without any error but I do not receipt any alert
code:
import json, sys, csv, os
import requests
def lambda_handler(event, context):
def Send2Slack(message):
if __name__ == '__main__':
print('inside slack function')
url = "webhook_URL"
title = (f"New Incoming Message")
slack_data = {
"username": "abc",
"channel" : "xyz",
"attachments": [
{
"color": "#ECB22E",
"fields": [
{
"title": title,
"value": message,
"short": "false",
}
]
}
]
}
byte_length = str(sys.getsizeof(slack_data))
headers = {'Content-Type': "application/json", 'Content-Length': byte_length}
response = requests.post(url, data=json.dumps(slack_data), headers=headers)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
output = "Hello Slack "
Send2Slack(output)
Please let me know where I'm doing wrong and help me fix this issue.
I'm able to answer this issue.
def Send2Slack(message):
if __name__ == '__main__':
Once I removed if __name__ == '__main__': from send2slack function the it worked.
Otherwise, I was not able to get into the function.
Thanks for all your help.
This code was provided for me, it gives back a lot of info about one post i want to use it to get the same info in this url and increase the posts number in this page now it's only 20
import requests
# https://haraj.com.sa/1179070147
def main(url):
params = {
'queryName': 'detailsPosts_singlePost',
'token': '',
'clientid': '812f41b2-9936-4405-aa9c-378db19b8cc4',
'version': '8.2.9 , 10 18 - 7 - 21'
}
data = {
"query": "query($ids:[Int]) { posts( id:$ids) {\n\t\titems {\n\t\t\tid status authorUsername title city postDate updateDate hasImage thumbURL authorId bodyHTML bodyTEXT city tags imagesList commentStatus commentCount upRank downRank geoHash\n\t\t}\n\t\tpageInfo {\n\t\t\thasNextPage\n\t\t}\n\t\t} }",
"variables": {
"ids": [
79070147
]
}
}
r = requests.post(url, params=params, json=data)
print(r.json())
main('https://graphql.haraj.com.sa/')
any help is appreciated
thanks
Loop over the pages in order to obtain the desired information.
Note, you can pickup all information from the JSON response directly without needs to call the API again.
import requests
from pprint import pp
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0'
}
def main(url):
with requests.Session() as req:
req.headers.update(headers)
for page in range(1, 3): # increase the number of pages from here.
params = {
'queryName': 'detailsPosts_tag_page{}'.format(page),
'token': '',
'clientid': '812f41b2-9936-4405-aa9c-378db19b8cc4',
'version': '8.2.9 , 10 18 - 7 - 21'
}
data = {
"query": "query($tag:String,$page:Int) { posts( tag:$tag, page:$page) {\n\t\titems {\n\t\t\tid status authorUsername title city postDate updateDate hasImage thumbURL authorId bodyHTML bodyTEXT city tags imagesList commentStatus commentCount upRank downRank geoHash geoCity geoNeighborhood\n\t\t}\n\t\tpageInfo {\n\t\t\thasNextPage\n\t\t}\n\t\t} }",
"variables": {
"page": page,
"tag": "حراج العقار"
}
}
r = req.post(url, params=params, json=data)
if r.status_code == 200:
for i in r.json()['data']['posts']['items']:
pp(i)
# check i.keys()
else:
exit(f"Page# {page} is not exist, However program stopped.")
main('https://graphql.haraj.com.sa/')
I have some POST method url, 2 headers to pass and a big body in Json format, that I need to call through the Groovy code. But I am not sure on points like how to pass headers and big Json object in Groovy code for API call. Please help me on thease points. I am writin this code in visual code.
Grab(group='org.codehaus.groovy.modules.http-builder', module='http-builder', version='0.7.1' )
import groovyx.net.http.*
import static groovyx.net.http.ContentType.*
import static groovyx.net.http.Method.*
def post = new URL("https://xyxz/api/testRequest/generic").openConnection();
def message = '{
"test": "test",
"test1": "test1\n\t",
"test2": {
"test3": "test3",
"test4": "test4"
}'
post.setRequestMethod("POST")
post.setDoOutput(true)
post.setRequestProperty("Content-Type", "application/json")
post.setHeader("id","sadasdas1212134");
post.setHeader("id2","sdsd34sdsfdfdfdf");
post.getOutputStream().write(message.getBytes("UTF-8"));
def postRC = post.getResponseCode();
println(postRC);
if(postRC.equals(200)) {
println(post.getInputStream().getText());
}
Straight from the ref-doc
import groovyx.net.http.HttpBuilder
def body = [
"test": "test",
"test1": "test1\n\t",
"test2": [
"test3": "test3",
"test4": "test4"
]
]
def result = HttpBuilder.configure {
request.uri = 'https://xyxz/api/testRequest/generic'
request.headers.id = 'sadasdas1212134'
request.headers.id2 = 'sdsd34sdsfdfdfdf'
request.contentType = 'application/json'
request.body = body
}.post()
println result
My code is to search the data from the ES database And add some new data in the database . I used to run in a single process, but this is too inefficient, so I want to add asyncio to my code. How can I do it? I removed the URL of my ES data based on security considerations. is there anyone can help me?
import json
import requests
from elasticsearch import Elasticsearch
import asyncio
class Cited:
def __init__(self):
self.es = Elasticsearch(
[''],
)
async def get_es_item(self):
query_body = {
"from": 0,
"size": 10000
,
"query": {
"bool": {
"must":
{
"exists": {
"field": "extra.S2PaperId"
}
}
, "must_not":
{"exists": {
"field": "extra.citations"
}}
}
}
}
items = self.es.search(index='item', body=query_body, doc_type=None, request_timeout=6000)
items = items['hits']['hits']
for item in items:
item_type = item['_type']
item_id = item['_id']
S2PaperId = item['_source']['extra']['S2PaperId']
self.search_ss(item_id=item_id, paperId=S2PaperId, item_type=item_type)
def search_ss(self, item_id, paperId, item_type):
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh Intel Mac OS X 10_13_4) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
req = requests.get('https://api.semanticscholar.org/v1/paper/' + paperId, headers=headers, timeout=100)
# logging.info(req.url)
if req.status_code == 200:
req = json.loads(req.text)
citations = len(req['citations'])
citationVelocity = req['citationVelocity']
influentialCitationCount = req['influentialCitationCount']
self.es.update(index='item', doc_type=item_type, id=item_id,
body={'doc': {'extra': {'citations': citations, 'citationVelocity': citationVelocity,
'influentialCitationCount': influentialCitationCount}}},
request_timeout=6000)
print(item_id, item_type, citations, citationVelocity, influentialCitationCount)
else:
print('s2paper 出错了 直接补0' + item_id, item_type)
self.es.update(index='item', doc_type=item_type, id=item_id,
body={'doc': {'extra': {'citations': 0, 'citationVelocity': 0,
'influentialCitationCount': 0}}},
request_timeout=6000)
cited = Cited()
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(cited.get_es_item(), cited.get_es_item(), cited.get_es_item(), cited.get_es_item()))