WSGI - importing user made classes - wsgi

Setup is following:
Apache with WSGI successfully set up and running on bare bones application
import sys
# Path of the directory where scripts directory is located.
sys.path.insert(0, 'C:\\Users\\xxx\\Documents\\Programming\\www\\scripts')
from Blog import Blog #Blog.py is a file in the scripts directory
def application(environ, start_response):
status = '200 OK'
output = ''
b = Blog()
for key in environ.keys():
output = output + str(key) + ' : ' + str(environ[key]) + '<br/>'
output = "Test: " + b.title + "<br/>" + b.text + "<br/>" + output
output = b.get_blog() + "<br/>" + output
response_headers = [('Content-type', 'text/html'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
the relevant parts of Blog.py looks like this
class Blog:
#blogid is a str
def get_blog(self):
conn = sqlite3.connect(self.database)
c = conn.cursor()
# get the blog
c.execute('select * from blogs')
results = []
for row in c:
results.append(row)
c.close()
return results
Apache error log gives me:
line 21, in application
output = b.get_blog() + "<br/>" + output
AttributeError: Blog instance has no attribute 'get_blog'
Changing the b.get_blog to str(dir(b)) gives me:
['doc', 'init', 'module', 'get_data', 'text', 'title'] which is an old version of the Blog class, one that I included in the wsgi file a while ago. I can't find where it's being cached or then why it's not being over written by the Blog import because if I change the import to just import Blog and the instantiation to Blog.Blog() it still gives the same dir contents.

Imported modules have an '__file__' attribute. Print that out, it will tell you where the file is that it loaded for the module. Then you can work out the conflict.
Maybe a version elsewhere on module search path or could be an old .pyc file.

Related

want to count if a file exists and store a duplicate followed by a copy number on python

I want to make a script that monitors changes in a folder and moves files to special directories. I use pip-watchdog
from watchdog.events import FileSystemEventHandler
def makeUnique(path,counter):
filename, extension = os.path.splitext(path)
# IF FILE EXISTS, ADDS NUMBER TO THE END OF THE FILENAME
file_split=filename.split('\\')
new_name="DUPLICATE_"+file_split[-1]
new_file=file_split[-1].replace(file_split[-1],new_name)
while os.path.exists(path):
path = new_file + " (" + str(counter) + ")" + extension
counter += 1
return path
def move(dest, entry, name):
"""dest = D:\Download\ChromeSetup.exe"""
file_exists = os.path.exists(dest + "\\" + name)
counter=1
if file_exists:
unique_name = makeUnique(entry,counter)
dest = dest+"\\"+unique_name
os.rename(entry, dest)
try:
print(f'[magenta][[/] [indian_red1 bold]{name}[/] [magenta]][/] -> [magenta][[/]
[orange_red1 bold]{dest}[/] [magenta]][/] to ' + f'[dark_orange]{dest}[/]')
shutil.move(entry,dest)
except:
pass
def moveByExtension(str, dest_dir, entry, name):
if name.endswith(str):
dest = dest_dir
move(dest, entry, name)
dest_dir_exe = r'D:\Download\exe_files'
class MoverHandler(FileSystemEventHandler):
def on_created(self, event):
"""when a file or directory is created"""
with os.scandir(source_dir) as entries:
for entry in entries:
name = entry.name
# app
moveByExtension('.exe', dest_dir_exe, entry, name)
My current filesystem tree:
-Download/
-- ChromeSetup.exe (rename and move to exe_files)
-Download/exe_files/
-- ChromeSetup.exe
-- DUPLICATE_ChromeSetup (1).exe
But if ChromeSetup.exe is in the download again, then the error occurs 'DUPLICATE_ChromeSetup (1).exe' already exists, although 'DUPLICATE_ChromeSetup (2).exe' is expected.
want to count if a file exists and store a duplicate followed by a copy number
I'm learning python at the moment...maybe I don't see the obvious problem
thank you in advance

How to automate an API to change the URL every hour and append the new data to a csv

I have successfully implemented an API that generates a unique URL to grab data from a database and downloads it into a csv. I am now attempting to automate this API so that it can generate the unique URL every hour and then append the csv file with the new data. I have no idea where to begin to automate this but the working API is pasted below so any help would be truly appreciated. Thank you.
import os
import sys
from datetime import datetime
from os.path import expanduser
import urllib.request
def main():
# API parameters
options = {}
options["url"] = "https://airnowapi.org/aq/data/"
options["start_date"] = "2020-01-01"
options["start_hour_utc"] = "01"
options["end_date"] = "2020-01-01"
options["end_hour_utc"] = "05"
options["parameters"] = "pm25"
options["bbox"] = "-76,38,-72,42"
options["data_type"] = "b"
options["format"] = "text/csv"
options["ext"] = "csv"
options["api_key"] = "" #NotIncludedforProtectionOfUniqueAPIkey
# API request URL
REQUEST_URL = options["url"] \
+ "?startdate=" + options["start_date"] \
+ "t" + options["start_hour_utc"] \
+ "&enddate=" + options["end_date"] \
+ "t" + options["end_hour_utc"] \
+ "&parameters=" + options["parameters"] \
+ "&bbox=" + options["bbox"] \
+ "&datatype=" + options["data_type"] \
+ "&format=" + options["format"] \
+ "&api_key=" + options["api_key"]
try:
# Request AirNowAPI data
print("Requesting AirNowAPI data...")
print(REQUEST_URL)
# User's home directory.
home_dir = expanduser("E:\SPRING2021\AIRNOWAPI\AIRNOWFILES")
download_file_name = "AirNowAPI" + datetime.now().strftime("_%Y%M%d%H%M%S." + options["ext"])
download_file = os.path.join(home_dir, download_file_name)
# Perform the AirNow API data request
api_data = urllib.request.URLopener()
api_data.retrieve(REQUEST_URL, download_file)
# Download complete
print ("Download URL: %s" % REQUEST_URL)
print("Download File: %s" % download_file)
except Exception as e:
print("Unable perform AirNowAPI request. %s" % e)
sys.exit(1)
if __name__ == "__main__":
main()
I find most of your code is well documented. There are many ways to automated your task. Here are the steps I would recommend you to do.
Create a Config file.
Try to separate your code from config(All the options data). You can even pickle it.
Make your code command line executable like python main.py config.yml, where you can pass config.file.
Checkpoint: Here your code should be one/multiple file and config is in another file.
Use a cronjob or any scheduler to trigger & step 3.
Shared Variables/Data?: If you have variables that need to be passed from first execution to another then you can use a static file, where you dump this data and using it for next interation

Python Program error - The process cannot access the file because it is being used by another process

I am trying to test a python code which moves file from source path to target path . The test is done using pytest in Python3 . But I am facing a roadblock here. It is that , I am trying to remove the source and target paths at end of code completion. For this I am using a command like shutil.rmtree(path) or os.rmdir(path) . This is causing me the error - " [WinError 32] The process cannot access the file because it is being used by another process". Please help me on this. Below is the python pytest code :
import pytest
import os
import shutil
import tempfile
from sample_test_module import TestCondition
object_test_condition = TestCondition()
#pytest.mark.parametrize("test_value",['0'])
def test_condition_pass(test_value):
temp_dir = tempfile.mkdtemp()
temp_src_folder = 'ABC_File'
temp_src_dir = os.path.join(temp_dir , temp_src_folder)
temp_file_name = 'Sample_Test.txt'
temp_file_path = os.path.join(temp_src_dir , temp_file_name)
os.chdir(temp_dir)
os.mkdir(temp_src_folder)
try:
with open(temp_file_path , "w") as tmp:
tmp.write("Hello-World\n")
tmp.write("Hi-All\n")
except IOError:
print("Error has occured , please check it.")
org_val = object_test_condition.sample_test(temp_dir)
print("Temp file path is : " + temp_file_path)
print("Temp Dir is : " + temp_dir)
shutil.rmtree(temp_dir)
print("The respective dir path is now removed.)
assert org_val == test_value
Upon execution of the code , the below error is popping up :
[WinError32] The process cannot access the file because it is being used by another process : 'C:\Users\xyz\AppData\Local\Temp\tmptryggg56'
You are getting this error because the directory you are trying to remove is the current directory of the process. If you save the current directory before calling os.chdir (using os.getcwd()), and chdir back to that directory before removing temp_dir, it should work.
Your code isn't correctly indented, so here is my best guess at what it should look like.
import pytest
import os
import shutil
import tempfile
from sample_test_module import TestCondition
object_test_condition = TestCondition()
#pytest.mark.parametrize("test_value",['0'])
def test_condition_pass(test_value):
temp_dir = tempfile.mkdtemp()
temp_src_folder = 'ABC_File'
temp_src_dir = os.path.join(temp_dir , temp_src_folder)
temp_file_name = 'Sample_Test.txt'
temp_file_path = os.path.join(temp_src_dir , temp_file_name)
prev_dir = os.getcwd()
os.chdir(temp_dir)
os.mkdir(temp_src_folder)
try:
with open(temp_file_path , "w") as tmp:
tmp.write("Hello-World\n")
tmp.write("Hi-All\n")
except IOError:
print("Error has occured , please check it.")
org_val = object_test_condition.sample_test(temp_dir)
print("Temp file path is : " + temp_file_path)
print("Temp Dir is : " + temp_dir)
os.chdir(prev_dir)
shutil.rmtree(temp_dir)
print("The respective dir path is now removed.)
assert org_val == test_value
Can you try to close the temp file before removing
temp.close()

RETR downloading zip File from ftp not writing

I am trying to donwload a huge zip file (~9Go zipped and ~130GO unzipped) from an FTP with python using the ftplib library but unfortunately when using the retrbinary method, it does create the file in my local diretory but it is not writing into the file. After a while the code runs, I get an timeout error. It used to work fine before, but when I tried to go deeper in the use of sockets by using this code it does not work anymore. Indeed, as the files I am trying to download are huge I want to have more control with the connection to prevent timeout error while downloading the files. I am not very familar with sockets so I may have misused it. I have been searching online but did not find any problems like this. (I tried with smaller files too for test but still have the same issues)
Here are the function that I tried but both have problems (method 1 is not writing to file, method 2 donwloads file but I can't unzip it)
import time
import socket
import ftplib
import threading
# To complete
filename = ''
local_folder = ''
ftp_folder = ''
host = ''
user = ''
mp = ''
# timeout error in method 1
def downloadFile_method_1(filename, local_folder, ftp_folder, host, user, mp):
try:
ftp = ftplib.FTP(host, user, mp, timeout=1600)
ftp.set_debuglevel(2)
except ftplib.error_perm as error:
print(error)
with open(local_folder + '/' + filename, "wb") as f:
ftp.retrbinary("RETR" + ftp_folder + '/' + filename, f.write)
# method 2 works to download zip file, but header error when unziping it
def downloadFile_method_2(filename, local_folder, ftp_folder, host, user, mp):
try:
ftp = ftplib.FTP(host, user, mp, timeout=1600)
ftp.set_debuglevel(2)
sock = ftp.transfercmd('RETR ' + ftp_folder + '/' + filename)
except ftplib.error_perm as error:
print(error)
def background():
f = open(local_folder + '/' + filename, 'wb')
while True:
block = sock.recv(1024*1024)
if not block:
break
f.write(block)
sock.close()
t = threading.Thread(target=background)
t.start()
while t.is_alive():
t.join(60)
ftp.voidcmd('NOOP')
def unzip_file(filename, local_folder):
local_filename = local_folder + '/' + filename
with ZipFile(local_filename, 'r') as zipObj:
zipObj.extractall(local_folder)
And the error I get for method 1:
ftplib.error_temp: 421 Timeout - try typing a little faster next time
And the error I get when I try to unzip after using method 2:
zipfile.BadZipFile: Bad magic number for file header
Alos, regarding this code If anyone could explain what this does concerning socketopt too would be helpful:
ftp.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
Thanks for your help.

how to create correct folder directory to output files from script assertion

I want to know what is the correct way of setting up a folder directory within SOAPUI. Should I use setup scripts within each testcase or testsuite level or should they be setup within a groovy script step whenever required?
Currently I decided to use the groovy script method only because if I use it in a setup script, it means I have to run the setup script first to get the folder directory before I can run the test case that contains a script assertion.
Below is an example of my folder directory set up in a groovy script (called test script):
def date = new Date()
def folderTime = date.format("yyyy-MM-dd HH-mm-ss")
//Create a folder directory for the responses
RootResultFolder = dataFolder + "\\Log Smoke Test Data" + "\\xxx" + "\\xxx - " + folderTime + "\\"
CreateResultFolder = new File(RootResultFolder)
CreateResultFolder.mkdir()
...
context.setProperty( "RootResultFolder", RootResultFolder )
Below is my script assertion within a test step that uses the above folder directory:
def date = new Date().format("yyyy-MM-dd")
def time = new Date().format("HH.mm.ss")
def dataFolder = context.getProperty("RootResultFolder")
def fileName = xxx+ ".txt"
def rootFolder = dataFolder + fileName
def logFile = new File(rootFolder)
logFile.write "TEXT: " + xxx + "\n\n" +
JsonOutput.prettyPrint
Thank you
I suggest you place them relative to project with the following code
def groovyUtils = new com.eviware.soapui.support.GroovyUtils(context)
// define location relative to SOAPUI project.
String projectPath = groovyUtils.projectPath + "/destination/"
context.setProperty( "RootResultFolder", projectPath)

Resources