Accessing local variable within same class python - python-3.x

I tried to create a dataframe from excel files in remote desktop using Paramiko, but I failed to call setPath() to writeDataframe() and got this error
TypeError: writeDataframe() takes 1 positional argument but 2 were given
Here is my code :
class remoteConnection:
def __init__(self,hostname, username, password, path):
self.hostname = hostname
self.username = username
self.password = password
self.path = path
def connectRemote(self):
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(hostname=self.hostname,
username=self.username,
password=self.password,
look_for_keys=False,
allow_agent=False)
sftp_handle = client.open_sftp()
sftp_handle.chdir('.' + '/' + self.path)
base_dir = sftp_handle.getcwd()
return sftp_handle, base_dir
def setPath(self, *pattern):
current_sftp_handle , current_dir = self.connectRemote()
for i in pattern :
current_sftp_handle.chdir(current_dir+'/'+str(i)+'/')
updated_dir = current_sftp_handle.getcwd()
return current_sftp_handle
def writeDataframe(self):
updated_sftp_handle = self.setPath()
filename = [file for file in updated_sftp_handle.listdir()]
appended_data = pd.DataFrame()
for file in filename:
f = updated_dir.open(file)
temp_df = pd.read_excel(f)
temp_df['Filename'] = file
utime = f.stat().st_mtime
temp_df['Date modified'] = datetime.fromtimestamp(utime)
appended_data = appended_data.append(temp_df)
return appended_data
I called my function like this:
hostname='xxx.xxx.x.xxx'
username='username'
password1='XXXXXXXXXXXX'
path1 = '/Documents/datasets'
initClass = remoteConnection(hostname, username, password1, path1)
connection = initClass.connectRemote()
a = ['SOURCE A', 'SOURCE B']
for i in a:
my_obj = initClass.setPath(i)
print(my_obj)
dg = initClass.writeDataframe(my_obj)
Has anyone got any advice? Thanks

Obviously you forgot to declare the argument or are passing an unneded argument:
Oprion 1: declare the argument:
def writeDataframe(self, arg):
Option 2: call the method without the arg:
dg = initClass.writeDataframe()

Related

How to write a python function that creates .ini file for unknown inputs?

I used configparser in python as below and created the .ini file. But, here all the inputs are predefined. Is there a way to create a .ini file where inputs are user driven?
import configparser
def create_ini_file():
config = configparser.ConfigParser()
config['DEFAULT'] = {'Designation': 'FullStackDeveloper',
'ProjectName': 'Github/SourceTree',
'Staysatacademy': 'no'
}
config['Avinash Srivastava'] = {}
Member1 = config['Avinash Srivastava']
Member1['gender'] = 'Male'
Member1['ID'] = 'HQ110'
Member1['Experience'] = '2 yrs'
Member1['Staysatacademy'] = 'yes'
config['Neha Sharma'] = {}
Member2 = config['Neha Sharma']
Member2['gender'] = 'Female'
Member2['ID'] = 'HQ111'
Member2['Experience'] = '1 yr'
with open(r'D:\Dev\repo\t1\docs\team1.ini', 'w') as file:
config.write(file)
create_ini_file()

How can I delete via python all the s3 "delete marker" placeholder filtered by "LastModified " (for ex. just from yesterday)?

I modified some scripts to delete all the "delete marker" placeholders from some bucket in s3 but I never found/developed something where I can delete the "delete marker" filtered by datetime.
My scope is to create a script to run after a "delete_object_job" failure, so I can run a new script where set the datatime of the failure and delete all the "delete-marker" just from that datetime.
Actually from this code I can delete all the "data marker" from some buckets but without a datetime filter:
#!/usr/bin/env python
import json
from datetime import datetime
from boto3 import Session
BUCKET_NAME = "germano"
prefix = ''
session = Session(region_name='eu-south-1', profile_name='default')
bucket = session.client('s3')
MAX_KEYS = 10000
def get_bucket_versions(version_id, key):
return bucket.list_object_versions(Bucket=BUCKET_NAME,
MaxKeys=MAX_KEYS,
Prefix=prefix,
VersionIdMarker=version_id,
KeyMarker=key)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
#ottengo gli attributes della classe creata DateTimeEncoder
#DateTimeEncoder = DateTimeEncoder()
#attributes_of_DateTimeEncoder = dir(DateTimeEncoder)
#print(attributes_of_DateTimeEncoder)
def objects_to_restore(versions):
return [
{
'VersionId': marker['VersionId'],
'Key': marker['Key']
} for marker in versions.get('DeleteMarkers') if marker['IsLatest']
]
def restore_s3_objects(version_markers, count):
markers_to_delete = objects_to_restore(version_markers)
print(f"Will restore {len(markers_to_delete)} items during request number: {count}")
if not markers_to_delete:
return 0
bucket.delete_objects(Bucket=BUCKET_NAME, Delete={'Objects': markers_to_delete})
return len(markers_to_delete)
obj_list = bucket.list_object_versions(Bucket=BUCKET_NAME,
MaxKeys=MAX_KEYS,
Prefix=prefix)
_next_version_id = obj_list.get('NextVersionIdMarker')
_next_key_marker = obj_list.get('NextKeyMarker')
counter = 1
total_restored = restore_s3_objects(obj_list, counter)
while _next_version_id and _next_key_marker:
counter += 1
another_list_of_versions = get_bucket_versions(_next_version_id, _next_key_marker)
_next_version_id = another_list_of_versions.get('NextVersionIdMarker')
_next_key_marker = another_list_of_versions.get('NextKeyMarker')
total_restored += restore_s3_objects(another_list_of_versions, counter)
print(f"Total Restored: {total_restored}")
i solved just modifing a bit the function "objects_to_restore":
def objects_to_restore(versions, last_modified_timestamp="2022-04-28 09:19:56.986445+00:00"):
print (versions.get('DeleteMarkers'))
#print (versions.get('Versions'))
return [
{
'VersionId': marker['VersionId'],
'Key': marker['Key'],
} for marker in versions.get('DeleteMarkers')
if marker['IsLatest']
if str(marker["LastModified"]) >= str(last_modified_timestamp)

Write file name based on return

I'm creating a boto3 script that scrapes and uploads our entire accounts Public Ips and NatGateway Ips to our S3 bucket. I'm stuck on writing files for both returns. I would ideally like to write two separate files while still using the same filename variable you see in main(). Right now I can get this to work with only one return(either nat_ips or public_ips)
import boto3
from datetime import datetime
from csv import writer
def get_ips():
# Uses STS to assume the role needed.
boto_sts=boto3.client('sts')
sts_response = boto_sts.assume_role(
RoleArn='arn:aws:iam::1234:role/foo',
RoleSessionName='Foo'
)
# Save the details from assumed role into vars
sts_credentials = sts_response["Credentials"]
session_id = sts_credentials["AccessKeyId"]
session_key = sts_credentials["SecretAccessKey"]
session_token = sts_credentials["SessionToken"]
# List and store all the regions
ec2_client=boto3.client('ec2',aws_access_key_id=session_id,aws_secret_access_key=session_key,aws_session_token=session_token,region_name='us-west-1')
all_regions=[region['RegionName'] for region in ec2_client.describe_regions()['Regions']]
nat_ips = []
public_ips = []
for region in all_regions:
max_results = 1000
next_token = ''
ec2_client=boto3.client('ec2',aws_access_key_id=session_id,aws_secret_access_key=session_key,aws_session_token=session_token,region_name=region)
session=boto3.Session(aws_access_key_id=session_id, aws_secret_access_key=session_key, aws_session_token=session_token, region_name=region)
while next_token or next_token == '':
response = ec2_client.describe_nat_gateways(MaxResults=max_results, NextToken=next_token)
filters = [{'Name':'tag:Name', 'Values':['*sgw-eip']}]
get_ips = ec2_client.describe_addresses(Filters=filters)
for gateway in response["NatGateways"]:
for address in gateway["NatGatewayAddresses"]:
nat_ips.append(address["PublicIp"]+'/32')
for eip_dict in get_ips['Addresses']:
public_ip_string = eip_dict['Tags'][0]['Value'] + ' : ' + eip_dict['PublicIp']
public_ips.append(public_ip_string)
next_token = response.get("NextToken", None)
return nat_ips, public_ips
def _s3_upload(filename):
s3 = boto3.resource('s3')
bucket = 'foo-bar'
object_name = 'foo/'
s3.meta.client.upload_file(Filename=filename,Bucket=bucket,Key=object_name+filename)
print(f'Uploading {filename} to {bucket}')
def write_list_to_file(filename, data):
lines_string = '\n'.join(str(x) for x in data)
with open(filename,'w') as output:
output.writelines(lines_string)
print(f'Writing file to {filename}')
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
# Stuck here since I want to make it one variable
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
public_ips = get_ips()
nat_ips = get_ips()
print(filename)
write_list_to_file(filename, nat_ips)
_s3_upload(filename)
I see that you are already returning a tuple of public_ips and nat_ips from your get_ips() function. So in your main, you could collect them together as well.
You might try something like this:
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
# Stuck here since I want to make it one variable
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
nat_ips, public_ips = get_ips()
write_list_to_file(filename_nat_ips, nat_ips)
write_list_to_file(filename_public_ips, public_ips)
_s3_upload(filename_nat_ips)
_s3_upload(filename_public_ips)
I was doing it right the first time. And was trying to make it more complicated.
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
nat_ips, public_ips = get_ips()
print(filename_nat_ips)
print(filename_sga_ips)
write_list_to_file(filename_nat_ips, nat_ips)
write_list_to_file(filename_sga_ips, public_ips)
_s3_upload(filename_nat_ips)
_s3_upload(filename_sga_ips)

Pytest unittest function doesn't return any value

Could you please help me understand issue with below unittest?
Here's my function for which i am writing unittest.
def running_config_from_database(device):
try:
data = databaseproxy(cluster='https://xxx.xxxx.xxx.net')
datadb = 'test'
query = f'''fGetrunningconfigData('{device}')
'''
raw_data = data.execute_query(datadb, query)
# pdb.set_trace()
for items in raw_data.fetchall():
config = items['Config'].split('\r\n')
for index, line in enumerate(config):
if '$PASS$' in line:
if line.startswith('set groups ospf_test'):
config_line = line.replace('$PASS$', get_auth('ospf'))
config[index] = config_line
elif line.startswith('set groups rip_test'):
config_line = line.replace('$PASS$', get_auth('rsvp'))
config[index] = config_line
config = config + overload_config
return True, '\r\n'.join(config)
except Exception as e:
return False, f'Failed to get the running config from database, error: {e}'
Here's my unittest for this function:
#patch("scripts.test.overload_config")
#patch("scripts.test.get_auth")
#patch("scripts.test.databaseproxy.execute_query")
def test_running_config_from_database(self, mock_data, mock_cred, mock_overload):
ret = MagicMock()
ret.fetchall.return_value = [{'Hostname': 'devA', 'Config': 'set groups ospf_test secret $PASS$\r\n'}]
mock_data.return_value = ret
mock_cred.return_value = 'xyz'
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = tests.test_scripts.running_config_from_database('devA')
assert status and out1 == data
When I run this unittest to test the function, I get the below assertion error - looks like the function doesn't return any value.
#patch("scripts.test.overload_config")
#patch("scripts.test.get_auth")
#patch("scripts.test.databaseproxy.execute_query")
def test_running_config_from_database(self, mock_data, mock_cred, mock_overload):
ret = MagicMock()
ret.fetchall.return_value = [{'Hostname': 'devA', 'Config': 'set groups ospf_test secret $PASS$\r\n'}]
mock_data.return_value = ret
mock_cred.return_value = 'xyz'
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = tests.test_scripts.running_config_from_database('devA')
> assert status and out1 == data
E AssertionError: assert (True and '' == 'set groups d...rload_config2'
E + set groups ospf_test secret xyz
E +
E + sample_overload_config1
E + sample_overload_config2)
tests/test_scripts.py:80: AssertionError
I edited my function to reduce the complexity but it still doesn't work. not sure why.
Main Function:
==============
def running_config_from_database(device):
try:
pdb.set_trace()
config = running_config_database(device)
for index, line in enumerate(config):
if '$PASS$' in line:
if line.startswith('set groups ospf_test'):
config_line = line.replace('$PASS$', get_cred('ospf'))
config[index] = config_line
config = config + overload_config
return True, '\r\n'.join(config)
except Exception as e:
return False, f'Failed to get the running config from Database, error: {e}'
UnitTest Result for above Function:
=========================================================================================================== FAILURES ============================================================================================================
________________________________________________________________________________________________ test_running_config_from_database _________________________________________________________________________________________________
mock_cred = <MagicMock name='get_cred' id='140210277622336'>, mock_overload = ['sample_overload_config1', 'sample_overload_config2'], mock_running_config = <MagicMock name='running_config_database' id='140210277652128'>
#patch("test.test1.scripts.running_config_database")
#patch("test.test1.scripts.overload_config")
#patch("test.test1.scripts.get_cred")
def test_running_config_from_database(mock_cred, mock_overload, mock_running_config):
mock_running_config.return_value = ['set groups ospf_test secret $PASS$', '']
mock_cred.return_value = 'xyz'
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = test.test1.scripts.test_running_config_from_database('devA')
> assert status and out1 == data
E AssertionError: assert (True and '' == 'set groups d...rload_config2'
E + set groups ospf_test secret xyz
E +
E + sample_overload_config1
E + sample_overload_config2)
validation_tests/test_scripts.py:152: AssertionError
================================================================================================== 1 failed, 6 passed in 4.79s ==================================================================================================
The problem here is the assignment to mock_overload. If you want to adapt your mocked object you have to make sure that the object itself is changed. If you just assign another object (in this case, a list), your variable now points to the list object, while the original mock_overload is no longer referenced (and is not changed). So instead of writing:
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
you can for example write
mock_overload[:] = ['sample_overload_config1', 'sample_overload_config2']
For clarification, here is a simplified version of the original code:
>>> mock_overload = []
>>> id(mock_overload)
1477793866440
>>> mock_overload = [5, 6]
>>> id(mock_overload)
1477791015560 <- changed id, no longer pointing to the mock
Now the same with the fixed code:
>>> mock_overload = []
>>> id(mock_overload)
140732764763024
>>> mock_overload[:] = [5, 6]
>>> id(mock_overload)
140732764763024 <- unchanged id, still points to the mock
Note that mock_overload[:] = [5, 6] is basically a shortcut for:
mock_object.clear()
mock_object.extend([5, 6])
Answer is already provided in comment section by #MrBean Bremen. here's the UT after making changes suggested.
#patch("scripts.test.overload_config")
#patch("scripts.test.get_auth")
#patch("scripts.test.databaseproxy.execute_query")
def test_running_config_from_database(self, mock_data, mock_cred, mock_overload):
ret = MagicMock()
ret.fetchall.return_value = [{'Hostname': 'devA', 'Config': 'set groups ospf_test secret $PASS$\r\n'}]
mock_data.return_value = ret
mock_cred.return_value = 'xyz'
***mock_overload[:]*** = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = tests.test_scripts.running_config_from_database('devA')
assert status and out1 == data

Dictionary with functions versus dictionary with class

I'm creating a game where i have the data imported from a database, but i have a little problem...
Currently i get a copy of the data as a dictionary, which i need to pass as argument to my GUI, however i also need to process some data, like in this example:
I get the data as a dict (I've created the UseDatabase context manager and is working):
def get_user(name: str, passwd: str):
user = {}
user['name'] = name
user['passwd'] = passwd
with UseDatabase() as cursor:
_SQL = "SELECT id, cash, ruby FROM user WHERE name='Admin' AND password='adminpass'"
cursor.execute(_SQL)
res = cursor.fetchall()
if res:
user['id'] = res[0][0]
user['cash'] = res[0][1]
user['ruby'] = res[0][2]
return user
return res
.
.
.
def get_activities():
with UseDatabase() as cursor:
_SQL = "SELECT * FROM activities WHERE user_id='2'"
cursor.execute(_SQL)
res = cursor.fetchall()
if res:
ids = [i[0] for i in res]
activities = {}
for i in res:
activities[i[0]] = {'title':i[1],'unlock':i[2],'usr_progress':i[3]}
return (ids, activities)
return res
Need it as a dict in my GUI ("content" argument):
class SideBar:
def __init__(self, screen: 'pygame.display.set_mode()', box_width: int, box_height: int, content: dict, font: 'font = pygame.font.Font()'):
#content dict: {id: {'title':'','unlock':'','usr_progress':''},...}
self.box_width = box_width
self.box_height = box_height
self.box_per_screen = screen.get_height() // box_height
self.content = content
self.current_box = 1
self.screen = screen
self.font = font
self.generate_bar()
def generate_bar (self):
active = [i for i in self.content.keys() if i in range(self.current_box, self.current_box+self.box_per_screen)]
for i in range(self.box_per_screen):
gfxdraw.box(self.screen,pygame.Rect((0,i*self.box_height),(self.screen.get_width()/3,self.screen.get_height()/3)),(249,0,0,170))
self.screen.blit(self.font.render(str(active[i]) + ' - ' + self.content[active[i]]['title'], True, (255,255,255)),(10,i*self.box_height+4))
for i in range(self.box_per_screen):
pygame.draw.rect(self.screen,(50,0,0),pygame.Rect((0,i*self.box_height),(self.screen.get_width()/3,self.screen.get_height()/3)),2)
But still need to make some changes in the data:
def unlock_act(act_id):
if user['cash'] >= activities[act_id]['unlock'] and activities[act_id]['usr_progress'] == 0:
user['cash'] -= activities[act_id]['unlock']
activities[act_id]['usr_progress'] = 1
So the question is: in this situation should i keep a copy of the data as dict, and create a class with it plus the methods i need or use functions to edit the data inside the dict?

Resources