I was wondering if you can help, I'm trying to link a database to an API, whenever I try to run it it gives me the error
sqlite3.OperationalError: no such table: animals_information
I know the table has been created and it has contents as whenever I try to print it out it works but for some reasons then when in the code it comes out with the error if anyone could help that'd be great! The entire code is below.
import flask
from flask import Flask, request, jsonify
import sqlite3
app = flask.Flask(__name__)
app.config["DEBUG"] = True
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
#app.route('/', methods=['GET'])
def home():
return '''<h1>Animals Archive </h1>
<p>A prototype API to store animals data for our Code First Girl Project.</p>'''
#app.route('/api/v1/resources/animals/all', methods=['GET'])
def api_all():
conn = sqlite3.connect('animals_information.db')
conn.row_factory = dict_factory
cur = conn.cursor()
all_animals = cur.execute('SELECT * FROM animals_information;').fetchall()
return jsonify(all_animals)
#app.errorhandler(404)
def page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
#app.route('/api/v1/resources/animals', methods=['GET'])
def api_filter():
query_parameters = request.args
id = query_parameters.get('id')
name = query_parameters.get('name')
size = query_parameters.get('size')
fluffiness = query_parameters.get('fluffiness')
grumpiness = query_parameters.get('grumpiness')
friendliness = query_parameters.get('friendliness')
intelligence = query_parameters.get('intelligence')
mischief = query_parameters.get('mischief')
cuteness = query_parameters.get('cuteness')
query = "SELECT * FROM animals_information WHERE"
to_filter = []
if id:
query += ' id=? AND'
to_filter.append(id)
if name:
query += ' name=? AND'
to_filter.append(name)
if size:
query += ' size=? AND'
to_filter.append(size)
if fluffiness:
query += ' fluffiness=? AND'
to_filter.append(fluffiness)
if grumpiness:
query += ' grumpiness=? AND'
to_filter.append(grumpiness)
if friendliness:
query += ' friendliness=? AND'
to_filter.append(friendliness)
if intelligence:
query += ' intelligence=? AND'
to_filter.append(intelligence)
if mischief:
query += ' mischief=? AND'
to_filter.append(mischief)
if cuteness:
query += ' cuteness=? AND'
to_filter.append(cuteness)
query = query[:-4] + ';'
conn = sqlite3.connect('animals_information.db')
conn.row_factory = dict_factory
cur = conn.cursor()
results = cur.execute(query, to_filter).fetchall()
return jsonify(results)
app.run()
Related
I am new to textract and trying to get data from pdf and see in the form of key and value pair. This document has around 15 pages. So I have used this asynchronous example to get data from pdf, but I am getting this error.
Error:
botocore.errorfactory.InvalidParameterException: An error occurred (InvalidParameterException) when calling the StartDocumentAnalysis operation: Request has invalid parameters
https://docs.aws.amazon.com/textract/latest/dg/async-analyzing-with-sqs.html.
import boto3
import json
import sys
import time
class ProcessType:
DETECTION = 1
ANALYSIS = 2
class DocumentProcessor:
jobId = ''
region_name = ''
roleArn = ''
bucket = ''
document = ''
sqsQueueUrl = ''
snsTopicArn = ''
processType = ''
def __init__(self, role, bucket, document, region):
self.roleArn = role
self.bucket = bucket
self.document = document
self.region_name = region
self.textract = boto3.client('textract', region_name=self.region_name)
self.sqs = boto3.client('sqs', region_name=self.region_name)
self.sns = boto3.client('sns', region_name=self.region_name)
def ProcessDocument(self, type):
jobFound = False
self.processType = type
validType = False
# Determine which type of processing to perform
if self.processType == ProcessType.DETECTION:
response = self.textract.start_document_text_detection(
DocumentLocation={'S3Object': {'Bucket': self.bucket, 'Name': self.document}},
NotificationChannel={'RoleArn': self.roleArn, 'SNSTopicArn': self.snsTopicArn})
print('Processing type: Detection')
validType = True
# For document analysis, select which features you want to obtain with the FeatureTypes argument
if self.processType == ProcessType.ANALYSIS:
response = self.textract.start_document_analysis(
DocumentLocation={'S3Object': {'Bucket': self.bucket, 'Name': self.document}},
FeatureTypes=["TABLES", "FORMS"],
NotificationChannel={'RoleArn': self.roleArn, 'SNSTopicArn': self.snsTopicArn})
print('Processing type: Analysis')
validType = True
if validType == False:
print("Invalid processing type. Choose Detection or Analysis.")
return
print('Start Job Id: ' + response['JobId'])
dotLine = 0
while jobFound == False:
sqsResponse = self.sqs.receive_message(QueueUrl=self.sqsQueueUrl, MessageAttributeNames=['ALL'],
MaxNumberOfMessages=10)
if sqsResponse:
if 'Messages' not in sqsResponse:
if dotLine < 40:
print('.', end='')
dotLine = dotLine + 1
else:
print()
dotLine = 0
sys.stdout.flush()
time.sleep(5)
continue
for message in sqsResponse['Messages']:
notification = json.loads(message['Body'])
textMessage = json.loads(notification['Message'])
print(textMessage['JobId'])
print(textMessage['Status'])
if str(textMessage['JobId']) == response['JobId']:
print('Matching Job Found:' + textMessage['JobId'])
jobFound = True
self.GetResults(textMessage['JobId'])
self.sqs.delete_message(QueueUrl=self.sqsQueueUrl,
ReceiptHandle=message['ReceiptHandle'])
else:
print("Job didn't match:" +
str(textMessage['JobId']) + ' : ' + str(response['JobId']))
# Delete the unknown message. Consider sending to dead letter queue
self.sqs.delete_message(QueueUrl=self.sqsQueueUrl,
ReceiptHandle=message['ReceiptHandle'])
print('Done!')
def CreateTopicandQueue(self):
millis = str(int(round(time.time() * 1000)))
# Create SNS topic
snsTopicName = "AmazonTextractTopic" + millis
topicResponse = self.sns.create_topic(Name=snsTopicName)
self.snsTopicArn = topicResponse['TopicArn']
# create SQS queue
sqsQueueName = "AmazonTextractQueue" + millis
self.sqs.create_queue(QueueName=sqsQueueName)
self.sqsQueueUrl = self.sqs.get_queue_url(QueueName=sqsQueueName)['QueueUrl']
attribs = self.sqs.get_queue_attributes(QueueUrl=self.sqsQueueUrl,
AttributeNames=['QueueArn'])['Attributes']
sqsQueueArn = attribs['QueueArn']
# Subscribe SQS queue to SNS topic
self.sns.subscribe(
TopicArn=self.snsTopicArn,
Protocol='sqs',
Endpoint=sqsQueueArn)
# Authorize SNS to write SQS queue
policy = """{{
"Version":"2012-10-17",
"Statement":[
{{
"Sid":"MyPolicy",
"Effect":"Allow",
"Principal" : {{"AWS" : "*"}},
"Action":"SQS:SendMessage",
"Resource": "{}",
"Condition":{{
"ArnEquals":{{
"aws:SourceArn": "{}"
}}
}}
}}
]
}}""".format(sqsQueueArn, self.snsTopicArn)
response = self.sqs.set_queue_attributes(
QueueUrl=self.sqsQueueUrl,
Attributes={
'Policy': policy
})
def DeleteTopicandQueue(self):
self.sqs.delete_queue(QueueUrl=self.sqsQueueUrl)
self.sns.delete_topic(TopicArn=self.snsTopicArn)
# Display information about a block
def DisplayBlockInfo(self, block):
print("Block Id: " + block['Id'])
print("Type: " + block['BlockType'])
if 'EntityTypes' in block:
print('EntityTypes: {}'.format(block['EntityTypes']))
if 'Text' in block:
print("Text: " + block['Text'])
if block['BlockType'] != 'PAGE' and "Confidence" in str(block['BlockType']):
print("Confidence: " + "{:.2f}".format(block['Confidence']) + "%")
print('Page: {}'.format(block['Page']))
if block['BlockType'] == 'CELL':
print('Cell Information')
print('\tColumn: {} '.format(block['ColumnIndex']))
print('\tRow: {}'.format(block['RowIndex']))
print('\tColumn span: {} '.format(block['ColumnSpan']))
print('\tRow span: {}'.format(block['RowSpan']))
if 'Relationships' in block:
print('\tRelationships: {}'.format(block['Relationships']))
if ("Geometry") in str(block):
print('Geometry')
print('\tBounding Box: {}'.format(block['Geometry']['BoundingBox']))
print('\tPolygon: {}'.format(block['Geometry']['Polygon']))
if block['BlockType'] == 'SELECTION_ELEMENT':
print(' Selection element detected: ', end='')
if block['SelectionStatus'] == 'SELECTED':
print('Selected')
else:
print('Not selected')
if block["BlockType"] == "QUERY":
print("Query info:")
print(block["Query"])
if block["BlockType"] == "QUERY_RESULT":
print("Query answer:")
print(block["Text"])
def GetResults(self, jobId):
maxResults = 1000
paginationToken = None
finished = False
while finished == False:
response = None
if self.processType == ProcessType.ANALYSIS:
if paginationToken == None:
response = self.textract.get_document_analysis(JobId=jobId,
MaxResults=maxResults)
else:
response = self.textract.get_document_analysis(JobId=jobId,
MaxResults=maxResults,
NextToken=paginationToken)
if self.processType == ProcessType.DETECTION:
if paginationToken == None:
response = self.textract.get_document_text_detection(JobId=jobId,
MaxResults=maxResults)
else:
response = self.textract.get_document_text_detection(JobId=jobId,
MaxResults=maxResults,
NextToken=paginationToken)
blocks = response['Blocks']
print('Detected Document Text')
print('Pages: {}'.format(response['DocumentMetadata']['Pages']))
# Display block information
for block in blocks:
self.DisplayBlockInfo(block)
print()
print()
if 'NextToken' in response:
paginationToken = response['NextToken']
else:
finished = True
def GetResultsDocumentAnalysis(self, jobId):
maxResults = 1000
paginationToken = None
finished = False
while finished == False:
response = None
if paginationToken == None:
response = self.textract.get_document_analysis(JobId=jobId,
MaxResults=maxResults)
else:
response = self.textract.get_document_analysis(JobId=jobId,
MaxResults=maxResults,
NextToken=paginationToken)
# Get the text blocks
blocks = response['Blocks']
print('Analyzed Document Text')
print('Pages: {}'.format(response['DocumentMetadata']['Pages']))
# Display block information
for block in blocks:
self.DisplayBlockInfo(block)
print()
print()
if 'NextToken' in response:
paginationToken = response['NextToken']
else:
finished = True
def main():
bucket = 'poyocr'
document = '018394d5-0dd80d.pdf'
region_name = 'us-west-1'
roleArn='an:as:iam::58:usr/tect'
analyzer = DocumentProcessor(roleArn, bucket, document, region_name)
analyzer.CreateTopicandQueue()
analyzer.ProcessDocument(ProcessType.ANALYSIS)
analyzer.DeleteTopicandQueue()
if __name__ == "__main__":
main()
I am writing a Azure python function which read emails and write them to a Azure SQL Database. The code runs fine untill it handles a email with a picture in the body. Then the program stop and return the message of :
Starting worker process:py "C:\ProgramData\chocolatey\lib\azure-functions-core-tools-3\tools\workers\python\3.8/WINDOWS/X64/worker.py" --host 127.0.0.1 --port 65101 --workerId 773c7b91-5c39-4d22-8509-2baea30124bb --requestId 0260fedc-f842-4dba-a2e9-6c7630f7228a --grpcMaxMessageLength 2147483647
Than the Azure python function hangs on the type of email and stops working. I try to build Try and Exception with continue to ensures it pickups the next email aswell a check on the length of the email_body. But none of these workarounds is working.
My question is how can i solve / skip this problem so it can also handle / recognize email which has a picture in the email body.
ReadEmail.py
import logging
import email
import imaplib
import pyodbc
from datetime import datetime
from raFunctions import uitvullenDatum
# def readEmail(EMAIL_ADRES,EMAIL_PASSWORD,EMAIL_SERVER,DB_SERVER,DB_DATABASE,DB_USER,DB_PASSWORD):
logging.info('ReadEmail start')
# logging.info('ReadEmail pars:' + EMAIL_ADRES + ' ' + EMAIL_PASSWORD + ' ' + EMAIL_SERVER + ' ' + DB_SERVER + ' ' + DB_DATABASE + ' ' + DB_USER + ' ' + DB_PASSWORD )
#Configurations
EMAIL = 'xxxxxxx'
PASSWORD = 'xxxxxx'
SERVER = 'xxxxxx'
server = 'xxxxxx'
database = 'xxxxx'
username = 'xxxx'
password = 'xxxxx'
driver= '{ODBC Driver 17 for SQL Server}'
# Connection settings to Database and Email Server
mail = imaplib.IMAP4_SSL(SERVER)
mail.login(EMAIL, PASSWORD)
# cnxn = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';PORT=1433;DATABASE='+database+';UID='+username+';PWD='+ password +';Authentication=ActiveDirectoryPassword')
cnxn = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';PORT=1433;DATABASE='+database+';UID='+username+';PWD='+ password +'; autocommit=True' )
cursor = cnxn.cursor()
cursor_max_mail_date = cnxn.cursor()
max_mail_date = ""
try:
_sql_max_datum = "select format(max(o.ReceivingDate),'dd-MMM-yyyy') from wp_ra.ontvangenmail o "
cursor_max_mail_date.execute(_sql_max_datum)
rows = cursor_max_mail_date.fetchall()
for row in rows:
datum = str(row[0])
max_mail_date = datum
except Exception as e:
print('Max_Mail_Date Error:')
logging.error(str(e))
try:
mail.select('inbox')
# status, data = mail.search(None, 'ALL')
logging.info(max_mail_date)
status, data = mail.search(None, '(SINCE "19-May-2020")')
mail_ids = []
for block in data:
try:
mail_ids += block.split()
except:
print('Error in block in data')
continue
for i in mail_ids:
try:
status, data = mail.fetch(i, '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
try:
message = email.message_from_bytes(response_part[1])
mail_from = message['From']
mail_subject = message['Subject']
mail_date = message['Date']
print('mail_date: ' + mail_date + 'mail_subject: ' + mail_subject)
if message.is_multipart():
mail_content = ''
for part in message.get_payload():
if part.get_content_type() == 'text/plain':
mail_content += part.get_payload()
else:
mail_content = part.get_payload()
else:
mail_content = message.get_payload()
maildate = email.utils.parsedate(mail_date)
maildate = uitvullenDatum(str(maildate[0]),str(maildate[1]),str(maildate[2]),str(maildate[3]),str(maildate[4]))
print("Check Mail Content")
if(len(mail_content) >= 1147483647 ):
print("Niet in te lezen email: maildate: " + str(mail_date) + ' mail subject: ' + mail_subject )
else:
values = (maildate, mail_from, mail_subject, mail_content)
sql = "EXEC wp_ra.invoerenmail ?, ?, ?, ?"
cursor.execute(sql, (values))
cursor.commit()
logging.info("Data saved on the database")
except Exception as e:
print('Error reading mail:')
print(str(e))
continue
except:
print('Error i in mail_ids')
continue
except:
print("Fout bij het inlezen van de mail!")
continue
If you stop in some some email, you can try to use continue after output current exception to skip it to deal with other emails:
for i in s :
#Some logic here
try:
#Do something here
except:
print('print something in this place.')
continue
This question already has answers here:
How can I call a function within a class?
(2 answers)
Closed 2 years ago.
I tried to call a function in the other function but it is returning an error.
class Data_Wrangling:
def __init__(self):
self.ClassName = type(self).__name__ + '.'
self.Host = None
self.App_Id = None
self.App_Secret = None
self.PySpace = None
def Data_Retrieve(self,URL):
print('\n<> ' + inspect.currentframe().f_code.co_name)
JsonData = None
ST = time.time()
while JsonData is None:
try:
Response = requests.get(URL)
except Exception as E:
print('\n>> Failed: ' + str(E))
if (time.time() - ST)/60 > 1:
print('\n>> Failed: NO valid JsonData Retrieved - Source Down or Internet Issue, etc')
break
JsonData = None
continue
if Response.status_code == 200:
try:
JsonData = json.loads(Response.content.decode('utf-8'))
except Exception as E:
print('\n>> Failed: ' + str(E))
JsonData = None
continue
KN.TPE(ST)
return JsonData
def Processing(self, DatasetId):
Start_Date = '1900-01'
today = datetime.today()
End_Range = str(today.year)+ '-' + str(today.month)
Limit = 5000
Page = 1
URL_Part1 = 'https://maps.clb.org.hk/map/pageList?keyword=&startDate='
URL_Part2 = '&endDate='
URL_Part3 = '&address=&industry=&parentIndustry=&industryName=&accidentType=&accidentTypeName=&companyOwnership=&companyOwnershipName=&deathsNumber=&deathsNumberName=&injuriesNumber=&injuriesNumberName=&mapType=2&page='
URL_Part4 = '&limit='
URL = URL_Part1 + Start_Date + URL_Part2 + End_Range + URL_Part3 + str(Page) + URL_Part4 + str(Limit)
JsonData = Data_Retrieve(URL)
DW = Data_Wrangling()
Export_FilePath = DW.Processing('dtihwnb')
I receive error as:
JsonData = Data_Retrieve(URL)
NameError: name 'Data_Retrieve' is not defined
I am stuck here, thinking what is the issue that I have made here. Please help I am calling Data_Retrieve in Processing.
you need to call Data_Retrieve method using self because its inside the class.
Try to call your method like this..
JsonData = self.Data_Retrieve(URL)
I am getting the below error when I am downloading files using multiprocessing. I am downloading Wikipedia page views and they have it by hour so it might include a lot of downloading.
Any recommendation to why this error is caused and HOW TO SOLVE IT? Thanks
MaybeEncodingError: Error sending result:
''. Reason: 'TypeError("cannot serialize
'_io.BufferedReader' object",)'
import fnmatch
import requests
import urllib.request
from bs4 import BeautifulSoup
import multiprocessing as mp
def download_it(download_file):
global path_to_save_document
filename = download_file[download_file.rfind("/")+1:]
save_file_w_submission_path = path_to_save_document + filename
request = urllib.request.Request(download_file)
response = urllib.request.urlopen(request)
data_content = response.read()
with open(save_file_w_submission_path, 'wb') as wf:
wf.write(data_content)
print(save_file_w_submission_path)
pattern = r'*200801*'
url_to_download = r'https://dumps.wikimedia.org/other/pagecounts-raw/'
path_to_save_document = r'D:\Users\Jonathan\Desktop\Wikipedia\\'
def main():
global pattern
global url_to_download
r = requests.get(url_to_download)
data = r.text
soup = BeautifulSoup(data,features="lxml")
list_of_href_year = []
for i in range(2):
if i == 0:
for link in soup.find_all('a'):
lien = link.get('href')
if len(lien) == 4:
list_of_href_year.append(url_to_download + lien + '/')
elif i == 1:
list_of_href_months = []
list_of_href_pageviews = []
for loh in list_of_href_year:
r = requests.get(loh)
data = r.text
soup = BeautifulSoup(data,features="lxml")
for link in soup.find_all('a'):
lien = link.get('href')
if len(lien) == 7:
list_of_href_months.append(loh + lien + '/')
if not list_of_href_months:
continue
for lohp in list_of_href_months:
r = requests.get(lohp)
data = r.text
soup = BeautifulSoup(data,features="lxml")
for link in soup.find_all('a'):
lien = link.get('href')
if "pagecounts" in lien:
list_of_href_pageviews.append(lohp + lien)
matching_list_of_href = fnmatch.filter(list_of_href_pageviews, pattern)
matching_list_of_href.sort()
with mp.Pool(mp.cpu_count()) as p:
print(p.map(download_it, matching_list_of_href))
if __name__ == '__main__':
main()
As Darkonaut proposed. I used multithreading instead.
Example:
from multiprocessing.dummy import Pool as ThreadPool
'''This function is used for the download the files using multi threading'''
def multithread_download_files_func(self,download_file):
try:
filename = download_file[download_file.rfind("/")+1:]
save_file_w_submission_path = self.ptsf + filename
'''Check if the download doesn't already exists. If not, proceed otherwise skip'''
if not os.path.exists(save_file_w_submission_path):
data_content = None
try:
'''Lets download the file'''
request = urllib.request.Request(download_file)
response = urllib.request.urlopen(request)
data_content = response.read()
except urllib.error.HTTPError:
'''We will do a retry on the download if the server is temporarily unavailable'''
retries = 1
success = False
while not success:
try:
'''Make another request if the previous one failed'''
response = urllib.request.urlopen(download_file)
data_content = response.read()
success = True
except Exception:
'''We will make the program wait a bit before sending another request to download the file'''
wait = retries * 5;
time.sleep(wait)
retries += 1
except Exception as e:
print(str(e))
'''If the response data is not empty, we will write as a new file and stored in the data lake folder'''
if data_content:
with open(save_file_w_submission_path, 'wb') as wf:
wf.write(data_content)
print(self.present_extract_RC_from_RS + filename)
except Exception as e:
print('funct multithread_download_files_func' + str(e))
'''This function is used as a wrapper before using multi threading in order to download the files to be stored in the Data Lake'''
def download_files(self,filter_files,url_to_download,path_to_save_file):
try:
self.ptsf = path_to_save_file = path_to_save_file + 'Step 1 - Data Lake\Wikipedia Pagecounts\\'
filter_files_df = filter_files
self.filter_pattern = filter_files
self.present_extract_RC_from_RS = 'WK Downloaded-> '
if filter_files_df == '*':
'''We will create a string of all the years concatenated together for later use in this program'''
reddit_years = [2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018]
filter_files_df = ''
'''Go through the years from 2005 to 2018'''
for idx, ry in enumerate(reddit_years):
filter_files_df += '*' + str(ry) + '*'
if (idx != len(reddit_years)-1):
filter_files_df += '&'
download_filter = list([x.strip() for x in filter_files_df.split('&')])
download_filter.sort()
'''If folder doesn't exist, create one'''
if not os.path.exists(os.path.dirname(self.ptsf)):
os.makedirs(os.path.dirname(self.ptsf))
'''We will get the website HTML elements using beautifulsoup library'''
r = requests.get(url_to_download)
data = r.text
soup = BeautifulSoup(data,features="lxml")
list_of_href_year = []
for i in range(2):
if i == 0:
'''Lets get all href available on this particular page. The first page is the year page'''
for link0 in soup.find_all('a'):
lien0 = link0.get('href')
'''We will check if the length is 4 which corresponds to a year'''
if len(lien0) == 4:
list_of_href_year.append(url_to_download + lien0 + '/')
elif i == 1:
list_of_href_months = []
list_of_href_pageviews = []
for loh in list_of_href_year:
r1 = requests.get(loh)
data1 = r1.text
'''Get the webpage HTML Tags'''
soup1 = BeautifulSoup(data1,features="lxml")
for link1 in soup1.find_all('a'):
lien1 = link1.get('href')
'''We will check if the length is 7 which corresponds to the year and month'''
if len(lien1) == 7:
list_of_href_months.append(loh + lien1 + '/')
for lohm in list_of_href_months:
r2 = requests.get(lohm)
data2 = r2.text
'''Get the webpage HTML Tags'''
soup2 = BeautifulSoup(data2,features="lxml")
for link2 in soup2.find_all('a'):
lien2 = link2.get('href')
'''We will now get all href that contains pagecounts in their name. We will have the files based on Time per hour. So 24 hrs is 24 files
and per year is 24*365=8760 files in minimum'''
if "pagecounts" in lien2:
list_of_href_pageviews.append(lohm + lien2)
existing_file_list = []
for file in os.listdir(self.ptsf):
filename = os.fsdecode(file)
existing_file_list.append(filename)
'''Filter the links'''
matching_fnmatch_list = []
if filter_files != '':
for dfilter in download_filter:
fnmatch_list = fnmatch.filter(list_of_href_pageviews, dfilter)
i = 0
for fnl in fnmatch_list:
'''Break for demo purpose only'''
if self.limit_record != 0:
if (i == self.limit_record) and (i != 0):
break
i += 1
matching_fnmatch_list.append(fnl)
'''If the user stated a filter, we will try to remove the files which are outside that filter in the list'''
to_remove = []
for efl in existing_file_list:
for mloh in matching_fnmatch_list:
if efl in mloh:
to_remove.append(mloh)
'''Lets remove the files which has been found outside the filter'''
for tr in to_remove:
matching_fnmatch_list.remove(tr)
matching_fnmatch_list.sort()
'''Multi Threading of 200'''
p = ThreadPool(200)
p.map(self.multithread_download_files_func, matching_fnmatch_list)
except Exception as e:
print('funct download_files' + str(e))
From the accepted answer, I understood that it is simply replacing from multiprocessing import Pool by from multiprocessing.dummy import Pool.
This worked for me.
Following code is replacing 0 integer with False and Null with None:-
with open(tablename, "r") as table:
tname = table.read().splitlines()
sqlConnection = pymssql.connect(host_LZ,username_LZ,password_LZ,src_database)
cursor = sqlConnection.cursor()
for t in tname:
sqlst = "Select * from %s" % t
cursor.execute(sqlst)
data = cursor.fetchall()
print("data : ",data)