Getting NameError while importing a module - python-3.x

import requests
import json
token= "12882xxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
class T_bot:
def __init__(self):
self.base= "https://api.telegram.org/bot{}/".format(token)
self.sendurl= "https://api.telegram.org/bot{}/sendMessage".format(token)
#self.data= {'chat_id':1067109380, 'text': 'Hi I hope you are doing fine' }
def gett(self, offset=None):
url= self.base+"getUpdates?timeout=100"
if offset:
url = url+"&offset={}".format(offset+1)
a= requests.get(url)
return a.json()
def send(self, msg, chat_id):
url= self.base+"sendMessage?chat_id={}&text={}".format(chat_id,msg)
if msg is not None:
requests.get(url)
when I'm importing the above code (bot.py)as:
from bot import T_bot
update_id= None
def make_reply(msg):
reply= 'okay'
return reply
update_id= None
while True:
updates= T_bot.gett(self, offset= update_id)
updates= updates["result"]
if updates:
for item in updates:
update_id= item['update_id']
try:
message= item['message']['text']
except:
message= None
fromm= item['message']['from']['id']
reply= make_reply(message)
T_bot.send(reply, fromm)
it is throwing a NameError when I'm running the main file above:
Traceback (most recent call last):
File "C:\Users\shivam\Desktop\pypy\server.py", line 13, in <module>
updates= T_bot.gett(self, offset= update_id)
NameError: name 'self' is not defined
I understand that I have to instantiate class first but how can I do it when I'm importing another module. somebody, please explain it!

You will have to instantiate the class first.
Ie.
from bot import T_bot
update_id = None
def make_reply(msg):
reply = 'okay'
return reply
update_id = None
instantiated_class = T_bot()
while True:
updates = instantiated_class.gett(offset=update_id)
updates = updates["result"]
if updates:
for item in updates:
update_id = item['update_id']
try:
message = item['message']['text']
except:
message = None
fromm = item['message']['from']['id']
reply = make_reply(message)
T_bot.send(reply, fromm)

Related

failed to connect to all addresses grpc in python

I am trying to post data to arangodb using grpc and using script below but while running the client getting above error , My server is running in a good way,unable to figure out where is the error
from inspect import trace
import re
from urllib import response
import grpc
import os
import first_pb2_grpc as pb2_grpc
import first_pb2 as pb2
import json
import grpc
from typing import Dict, List
from google.protobuf import json_format
from first_pb2_grpc import*
import traceback
test_data_file_name = "data.json"
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_data_file = os.path.join(curr_dir,test_data_file_name)
def read_json_file(file):
with open(file, encoding="utf8") as f:
data = json.load(f)
return data
test_data = read_json_file(test_data_file)
channel = grpc.insecure_channel('localhost:31024')
stub = pb2_grpc.UnaryStub(channel)
def test(request):
try:
response = stub.GetServerResponse(request)
print(response , 'AAAAA')
return response
except Exception as e:
return str(e)
def test_add_name(message):
try:
request = pb2.Message(
message=message
)
test_response = test(request)
return test_response
except Exception as e:
traceback.print_exc()
return str(e)
if __name__ == "__main__":
message = test_data["message"]
#attribute_val = json_format.Parse(json.dumps(name) , message='hi')
api_response = test_add_name(message)
print(api_response)
Please tell me how can i resolve this

Reddit and Twitter bot in Python using PRAW

I am a beginner in Python, and trying out making a bot which automatically Tweets anything which is posted on a Subreddit that I have made.
I took help from some of the tutorials online which has the following code
import praw
import json
import requests
import tweepy
import time
access_token = '************************************'
access_token_secret = '************************************'
consumer_key = '************************************'
consumer_secret = '************************************'
def strip_title(title):
if len(title) == 94:
return title
else:
return title[:93] + "..."
def tweet_creator(subreddit_info):
post_dict = {}
post_ids = []
print("[bot] Getting posts from Reddit")
for submission in subreddit_info.get_hot(limit=20):
post_dict[strip_title(submission.title)] = submission.url
post_ids.append(submission.id)
print("[bot] Generating short link using goo.gl")
mini_post_dict = {}
for post in post_dict:
post_title = post
post_link = post_dict[post]
short_link = shorten(post_link)
mini_post_dict[post_title] = short_link
return mini_post_dict, post_ids
def setup_connection_reddit(subreddit):
print("[bot] setting up connection with Reddit")
r = praw.Reddit(' %s' %(subreddit))
subreddit = r.get_subreddit(subreddit)
return subreddit
def shorten(url):
headers = {'content-type': 'application/json'}
payload = {"longUrl": url}
url = "https://www.googleapis.com/urlshortener/v1/url"
r = requests.post(url, data=json.dumps(payload), headers=headers)
link = json.loads(r.text)['id']
return link
def duplicate_check(id):
found = 0
with open('posted_posts.txt', 'r') as file:
for line in file:
if id in line:
found = 1
return found
def add_id_to_file(id):
with open('posted_posts.txt', 'a') as file:
file.write(str(id) + "\n")
def main():
subreddit = setup_connection_reddit('*Name of the subreddit*')
post_dict, post_ids = tweet_creator(subreddit)
tweeter(post_dict, post_ids)
def tweeter(post_dict, post_ids):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
for post, post_id in zip(post_dict, post_ids):
found = duplicate_check(post_id)
if found == 0:
print("[bot] Posting this link on twitter")
print(post + " " + post_dict[post] + " #Python #reddit #bot")
api.update_status(post+" "+post_dict[post]+" #Python #reddit #bot")
add_id_to_file(post_id)
time.sleep(30)
else:
print("[bot] Already posted")
if __name__ == '__main__':
main()
The code seems fine in PyCharm, however I am getting the following error when I try to run it directly from the folder via Terminal using the rolling code, reddit_bot2.py is my file name:
python3 reddit_bot2.py
When I try to run the code I am getting the following error:
mahesh#Maheshs-MacBook-Air Atoms % python3 reddit_bot2.py
[bot] setting up connection with Reddit
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/configparser.py", line 846, in items
d.update(self._sections[section])
KeyError: '**Name of the subreddit to fetch posts from**'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/mahesh/Python_Bot/Atoms/reddit_bot2.py", line 82, in <module>
main()
File "/Users/mahesh/Python_Bot/Atoms/reddit_bot2.py", line 62, in main
subreddit = setup_connection_reddit('Bot167')
File "/Users/mahesh/Python_Bot/Atoms/reddit_bot2.py", line 36, in setup_connection_reddit
r = praw.Reddit(' %s' %(subreddit))
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/praw/reddit.py", line 227, in __init__
self.config = Config(
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/praw/config.py", line 85, in __init__
self.custom = dict(Config.CONFIG.items(site_name), **settings)
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/configparser.py", line 849, in items
raise NoSectionError(section)
configparser.NoSectionError: No section: ' Bot167'
You provided the name of a praw.ini configuration which does not exist.
For help with creating a Reddit instance, visit
https://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html
For help on configuring PRAW, visit
https://praw.readthedocs.io/en/latest/getting_started/configuration.html
Any help in this regards would be highly appreciated.
Thanks :)

error while importing images to odoo 11 using python script

I am trying to import images to odoo 11 using a python script. Earlier I used to work on a laptop with windows 7 installed and this script was working fine. But now, I have upgraded my laptop to windows 10 and I tried to run this same script but I am facing few errors.
Here is my script,
import csv
from pprint import pprint
import xmlrpc.client as xmlrpclib
class OpenERPXMLRPC():
#def __init__(self, host="0.0.0.0", port="8088", db="demo",
# user="admin",
# password="admin"):
def __init__(self, host="205.147.98.219", port="", db="BUILDSTATION_ROMFORD",
user="tina.santhosh#gmail.com",
password="buildstation1234*"):
common_url = "http://%s:%s/xmlrpc/common" % (host, port)
object_url = "http://%s:%s/xmlrpc/object" % (host, port)
com_sock = xmlrpclib.ServerProxy(common_url)
uid = com_sock.login(db, user, password)
if uid:
self.uid = uid
self.password = password
self.db = db
else:
print("Error in Authentication")
self.sock = xmlrpclib.ServerProxy(object_url)
def execute(self, model, method, *args):
res = self.sock.execute(self.db, self.uid, self.password, model,
method, *args)
return res
oe = OpenERPXMLRPC(db="BUILDSTATION_ROMFORD")
application = csv.reader(open('C:\\Users\\Asus\\Desktop\\test1.csv'))
for rec in application:
fields = rec
break
all_datas = []
count = 1
for rec in application:
all_datas.append(rec)
count = 0
all_error = []
for rec in all_datas:
count += 1
print(rec)
product_id = oe.execute(
'product.template',
'search',
[('name','=', rec[0])])
print("product_name--", product_id)
with open(rec[1], 'rb') as image:
image_base64 = image.read().encode("base64")
vals = {
'name': rec[0],
'image_medium': image_base64
}
oe.execute(
'product.template',
'write',
product_id,
vals)
I have created a separate file called test1.csv where I have uploaded only the product name and image location.
Here is the error that I am getting,
C:\Users\Asus>python c:\users\asus\desktop\final_import.py
['Airbrick Black', 'E:\\compressed images for odoo\\building materials\\airblocks\\ti0014.jpg']
product_name-- [4071]
Traceback (most recent call last):
File "c:\users\asus\desktop\final_import.py", line 55, in <module>
image_base64 = image.read().encode("base64")
AttributeError: 'bytes' object has no attribute 'encode'
any help here would be much appreciated.
Thanks,
Tina

Python 3.6 : TypeError: 'module' object is not callable

I was trying to run this module but it gives me this error.
Traceback (most recent call last):
File "C:/Python36/django file/mysite/personal\crawler.py", line 105, in crawl
self._crawl([u_parse.path], self.depth)
File "C:/Python36/django file/mysite/personal\crawler.py", line 132, in _crawl
html = self.get(url)
File "C:/Python36/django file/mysite/personal\crawler.py", line 117, in get
page = self.curl(url)
File "C:/Python36/django file/mysite/personal\crawler.py", line 144, in curl
req = urllib.request('%s://%s%s' % (self.scheme, self.domain, url))
TypeError: 'module' object is not callable
I'm trying to run a search engine for my web project, and it appears I have done something wrong at the code.
here's the code for crawler.py
import sqlite3
import urllib.request
import urllib.error
from html.parser import HTMLParser
from urllib.parse import urlparse
class HREFParser(HTMLParser):
"""
Parser that extracts hrefs
"""
hrefs = set()
def handle_starttag(self, tag, attrs):
if tag == 'a':
dict_attrs = dict(attrs)
if dict_attrs.get('href'):
self.hrefs.add(dict_attrs['href'])
def get_local_links(html, domain):
"""
Read through HTML content and returns a tuple of links
internal to the given domain
"""
hrefs = set()
parser = HREFParser()
parser.feed(html)
for href in parser.hrefs:
u_parse = urlparse(href)
if href.startswith('/'):
# purposefully using path, no query, no hash
hrefs.add(u_parse.path)
else:
# only keep the local urls
if u_parse.netloc == domain:
hrefs.add(u_parse.path)
return hrefs
class CrawlerCache(object):
"""
Crawler data caching per relative URL and domain.
"""
def __init__(self, db_file):
self.conn = sqlite3.connect(db_file)
c = self.conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS sites
(domain text, url text, content text)''')
self.conn.commit()
self.cursor = self.conn.cursor()
def set(self, domain, url, data):
"""
store the content for a given domain and relative url
"""
self.cursor.execute("INSERT INTO sites VALUES (?,?,?)",
(domain, url, data))
self.conn.commit()
def get(self, domain, url):
"""
return the content for a given domain and relative url
"""
self.cursor.execute("SELECT content FROM sites WHERE domain=? and url=?",
(domain, url))
row = self.cursor.fetchone()
if row:
return row[0]
def get_urls(self, domain):
"""
return all the URLS within a domain
"""
self.cursor.execute("SELECT url FROM sites WHERE domain=?", (domain,))
# could use fetchone and yield but I want to release
# my cursor after the call. I could have create a new cursor tho.
# ...Oh well
return [row[0] for row in self.cursor.fetchall()]
class Crawler(object):
def __init__(self, cache=None, depth=2):
"""
depth: how many time it will bounce from page one (optional)
cache: a basic cache controller (optional)
"""
self.depth = depth
self.content = {}
self.cache = cache
def crawl(self, url, no_cache=None):
"""
url: where we start crawling, should be a complete URL like
'http://www.intel.com/news/'
no_cache: function returning True if the url should be refreshed
"""
u_parse = urlparse(url)
self.domain = u_parse.netloc
self.content[self.domain] = {}
self.scheme = u_parse.scheme
self.no_cache = no_cache
self._crawl([u_parse.path], self.depth)
def set(self, url, html):
self.content[self.domain][url] = html
if self.is_cacheable(url):
self.cache.set(self.domain, url, html)
def get(self, url):
page = None
if self.is_cacheable(url):
page = self.cache.get(self.domain, url)
if page is None:
page = self.curl(url)
else:
print ("cached url... [%s] %s" % (self.domain, url))
return page
def is_cacheable(self, url):
return self.cache and self.no_cache \
and not self.no_cache(url)
def _crawl(self, urls, max_depth):
n_urls = set()
if max_depth:
for url in urls:
# do not crawl twice the same page
if url not in self.content:
html = self.get(url)
self.set(url, html)
n_urls = n_urls.union(get_local_links(html, self.domain))
self._crawl(n_urls, max_depth-1)
def curl(self, url):
"""
return content at url.
return empty string if response raise an HTTPError (not found, 500...)
"""
try:
print ("retrieving url... [%s] %s" % (self.domain, url))
req = urllib.request('%s://%s%s' % (self.scheme, self.domain, url))
response = urllib.urlopen(req)
return response.read().decode('ascii', 'ignore')
except urllib.error.HTTPError as e:
print ("error [%s] %s: %s" % (self.domain, url, e))
return ''
and for the test run file "run.py"
import re
from crawler import Crawler, CrawlerCache
if __name__ == "__main__":
# Using SQLite as a cache to avoid pulling twice
crawler = Crawler(CrawlerCache('crawler.db'))
root_re = re.compile('^/$').match
crawler.crawl('http://techcrunch.com/', no_cache=root_re)
crawler.crawl('http://www.engadget.com/', no_cache=root_re)
crawler.crawl('http://gizmodo.com/', no_cache=root_re)
crawler.crawl('http://www.zdnet.com/', no_cache=root_re)
crawler.crawl('http://www.wired.com/', no_cache=root_re)
I tried to fix it but still, It gives me the same error.
Anyone, please help me out.
The problem is in the curl method:
def curl(self, url):
"""
return content at url.
return empty string if response raise an HTTPError (not found, 500...)
"""
try:
print ("retrieving url... [%s] %s" % (self.domain, url))
# You're calling a module here. urllib.request is not a function
req = urllib.request('%s://%s%s' % (self.scheme, self.domain, url))
response = urllib.urlopen(req)
return response.read().decode('ascii', 'ignore')
except urllib.error.HTTPError as e:
print ("error [%s] %s: %s" % (self.domain, url, e))
return ''
Replace the urllib.request line by as urllib.request.urlopen call:
def curl(self, url):
"""
return content at url.
return empty string if response raise an HTTPError (not found, 500...)
"""
try:
print ("retrieving url... [%s] %s" % (self.domain, url))
# You're calling a module here. urllib.request is not a function
req = urllib.request.urlopen('%s://%s%s' % (self.scheme, self.domain, url))
response = urllib.urlopen(req)
return response.read().decode('ascii', 'ignore')
except urllib.error.HTTPError as e:
print ("error [%s] %s: %s" % (self.domain, url, e))
return ''
You try to call a module:
req = urllib.request('%s://%s%s' % (self.scheme, self.domain, url))
You are looking for the class Request:
req = urllib.request.Request('%s://%s%s' % (self.scheme, self.domain, url))
All in crawler.py in line 144.

AttributeError: 'module' object has no attribute 'ensure_future'

Hi i am writing a n/w bound server application using python asyncio which can accept a post request.
In post request i am accepting a symbol parameter
please tell me the best way to deal with n/w bound application.where i am collecting the data from another web api's by sending the post request to them.
Following is the code :
import asyncio
import aiohttp
import json
import logging
# async def fetch_content(url, symbols):
# yield from aiohttp.post(url, symbols=symbols)
#asyncio.coroutine
def fetch_page(writer, url, data):
response = yield from aiohttp.post(url, data=data)
resp = yield from response.read_and_close()
print(resp)
writer.write(resp)
return
#asyncio.coroutine
def process_payload(writer, data, scale):
tasks = []
data = data.split('\r\n\r\n')[1]
data = data.split('\n')
data = [x.split(':') for x in data]
print(data)
data = {x[0]: x[1] for x in data}
print(data)
# data = data[0].split(':')[1]
data = data['symbols']
print(data)
data = data.split(',')
data_len = len(data)
data_first = 0
data_last = scale
url = 'http://xxxxxx.xxxxxx.xxx/xxxx/xxxx'
while data_last < data_len:
tasks.append(asyncio.ensure_future(fetch_page(writer, url,{'symbols': ",".join(data[data_first:data_last])})))
data_first += scale
data_last += scale
tasks.append(asyncio.ensure_future(fetch_page(writer, url,{'symbols': ",".join(data[data_first:data_last])})))
loop.run_until_complete(tasks)
return
#asyncio.coroutine
def process_url(url):
pass
#asyncio.coroutine
def echo_server():
yield from asyncio.start_server(handle_connection, 'xxxxxx.xxxx.xxx', 3000)
#asyncio.coroutine
def handle_connection(reader, writer):
data = yield from reader.read(8192)
if data:
message = data.decode('utf-8')
print(message)
yield from process_payload(writer, message, 400)
writer.write_eof()
writer.close()
#url = 'http://XXXXXXX.xxxxx.xxx/xxxx/xxxxxx/xxx'
data = {'symbols': 'GD-US,14174T10,04523Y10,88739910,03209R10,46071F10,77543110,92847N10'}
loop = asyncio.get_event_loop()
loop.run_until_complete(echo_server())
try:
loop.run_forever()
finally:
loop.close()
But i am receiving the following error:
future: <Task finished coro=<handle_connection() done, defined at fql_server_async_v2.py:53> exception=AttributeError("'module' object has no attribute 'ensure_future'",)>
Traceback (most recent call last):
File "/home/user/anugupta/lib/python3.4/asyncio/tasks.py", line 234, in _step
result = coro.send(value)
File "fql_server_async_v2.py", line 60, in handle_connection
yield from process_payload(writer, message, 400)
File "/home/user/anugupta/lib/python3.4/asyncio/coroutines.py", line 141, in coro
res = func(*args, **kw)
File "fql_server_async_v2.py", line 41, in process_payload
tasks.append(asyncio.ensure_future(fetch_page(writer, url, {'symbols':",".join(data[data_first:data_last])})))
AttributeError: 'module' object has no attribute 'ensure_future'
^CTraceback (most recent call last):
File "fql_server_async_v2.py", line 72, in <module>
loop.run_forever()
File "/home/user/anugupta/lib/python3.4/asyncio/base_events.py", line 236, in run_forever
self._run_once()
File "/home/user/anugupta/lib/python3.4/asyncio/base_events.py", line 1017, in _run_once
event_list = self._selector.select(timeout)
File "/home/user/anugupta/lib/python3.4/selectors.py", line 424, in select
fd_event_list = self._epoll.poll(timeout, max_ev)
ensure_future was added in asyncio 3.4.4, use async for earlier versions.
While async is deprecated now it will be supported in oversable future.

Resources