search function return null odoo 8 - search

Can't search using variable
#api.multi
def send_email(self,invoice_id):
invoice_data = self.env['account.invoice'].browse(invoice_id)
email_template_obj = self.env['email.template']
template_id = self.env.ref('multi_db.email_template_subscription_invoice', False)
report_id = self.env.ref('account.account_invoices', False)
print'invoice_id',str(invoice_data.id) #Here prints invoice_id
attach_obj = self.pool.get('ir.attachment')
attachment_id = self.env['ir.attachment'].search([('res_id','=',invoice_data.id),('res_model','=','account.invoice')])
print'attachment_id1234',attachment_id
if template_id:
values = email_template_obj.generate_email(template_id.id,invoice_id)
values['subject'] = 'Invoice for AMS registration'
values['email_to'] = invoice_data.partner_id.email
values['partner_to'] = invoice_data.partner_id
# values['attachment_ids'] = [(6, 0, report_id.id)]
values['attachment_ids'] = [(6, 0, [attachment_id.id])]
# print'values',values
mail_obj = self.env['mail.mail']
msg_id = mail_obj.create(values)
if msg_id:
mail_obj.send([msg_id])
return True
It returns ir.attachment()
But
If i hard code that value it will return id:
attachment_id = self.env['ir.attachment'].search([('res_id','=',60),('res_model','=','account.invoice')])
It returns ir.attachment(53).
How can i use variable instead of a static value?

Related

How to find _ transferSize in har file exported using browsermob-proxy in python

I am trying to export .har file using firefox-selenium-browsermob-proxy-python. Using the below code.
bmp_loc = "/Users/project/browsermob-proxy-2.1.4/bin/browsermob-proxy"
server = Server(bmp_loc)
server.start()
proxy = server.create_proxy(params={'trustAllServers': 'true'})
selenium_proxy = proxy.selenium_proxy()
caps = webdriver.DesiredCapabilities.FIREFOX
caps['marionette'] = False
proxy_settings = {
"proxyType": "MANUAL",
"httpProxy": selenium_proxy.httpProxy,
"sslProxy": selenium_proxy.sslProxy,
}
caps['proxy'] = proxy_settings
driver = webdriver.Firefox(desired_capabilities=caps)
proxy.new_har("generated_har",options={'captureHeaders': True})
driver.get("someurl")
browser_logs = proxy.har
I am interested to get _transferSize in the .har file to perform some analysis but unable to get that, instead I am getting that as 'comment':
"redirectURL": "", "headersSize": 1023, "bodySize": 38, "comment": ""
whereas manually downloading the .har file using firefox I am getting _transferSize
Version used:
browsermob_proxy==2.1.4
selenium==4.0.0
Can anybody please help me to resolve this?
You can get _transferSize by adding headersSize and bodySize from the har file itself.
urls = ["https://google.com"]
for ur in urls:
server = proxy.start_server()
client = proxy.start_client()
client.new_har("demo.com")
# print(client.proxy)
options = webdriver.ChromeOptions()
options.add_argument("--disk-cache-size=0")
options = {
'enable_har': True
}
driver = webdriver.Chrome(seleniumwire_options=options)
driver.request_interceptor = proxy.interceptor
driver.get(ur)
time.sleep(40)
row_list = []
json_dictionary = json.loads(driver.har)
repeat_url_list = []
repeat_urls = defaultdict(lambda:[])
resp_size = 0
count_url = 0
url_time = 0
status_list = []
status_url = defaultdict(lambda:[])
a_list = []
with open("network_log2.har", "w", encoding="utf-8") as f:
# f.write(json.dumps(driver.har))
for i in json_dictionary['log']['entries']:
f.write(str(i))
f.write("\n")
url = i['request']['url']
a_list.append(url)
timing = i['time']
if timing>2000:
timing = round(timing/2000,1)
url_time += 1
status = i['response']['status']
if status in status_list:
status_url[status] = status_url[status] + 1
else:
status_url[status] = 1
status_list.append(status)
size = i['response']['headersSize'] + i['response']['bodySize']
if size//1000 > 500:
resp_size += 1
if url in repeat_url_list:
repeat_urls[url] = 1
else:
repeat_url_list.append(url)
rurl_count = len(repeat_urls)

Initialize Model Class Variable At Runtime

I am trying to import student data from an Excel workbook. I have to select column_name of the class StudentMasterResource dynamically which is present in the file. I got all column name in constants module has one dictionary which name column_name. When I do it for the first time, it works, then it fails
constants.py
column_name = dict()
resource.py
from common_account import constants
from import_export import widgets, fields, resources
def getClassName(key):
if key in constants.column_name:
return constants.column_name[key]
return key
class StudentMasterResource(resources.ModelResource):
organisation_id = fields.Field(
column_name=getClassName('organisation_id'),
attribute='organisation_id',
widget=widgets.ForeignKeyWidget(OrganisationMaster, 'organisation_name'),
saves_null_values=True
)
name = fields.Field(
column_name=getClassName('Name'),
attribute='name',
saves_null_values=True,
widget=widgets.CharWidget()
)
date_of_birth = fields.Field(
column_name=getClassName('date'),
attribute='date_of_birth',
saves_null_values=True,
widget=widgets.DateWidget()
)
views.py
from common_account import constants
from tablib import Dataset
#api_view(['POST'])
#permission_classes([IsAuthenticated])
def student_import(request):
if request.method == 'POST':
context_data = dict()
data_set = Dataset()
file = request.FILES['myfile']
extension = file.name.split(".")[-1].lower()
column_data = request.data
is_import = column_name['is_import']
constants.valid_data.clear()
constants.invalid_data.clear()
if extension == 'csv':
data = data_set.load(file.read().decode('utf-8'), format=extension)
else:
data = data_set.load(file.read(), format=extension)
constants.column_name = {
'date' : column_data.get('birth'),
'name' : column_data.get('name'),
}
if is_import == 'No':
result = student_resource.import_data(data_set, organisation_id = request.user.organisation_id,
offering_id = offering_id,all_invalid_data = False, dry_run=True, raise_errors=True)
context_data['valid_data'] = constants.valid_data
context_data['invalid_data'] = constants.invalid_data
context_data[constants.RESPONSE_RESULT] = {"Total records":student_resource.total_cnt,
"skip records":len(constants.invalid_data),
"Records imported": len(constants.valid_data),
}
return JsonResponse(context_data)
elif is_import == 'Yes':
result = student_resource.import_data(data_set, organisation_id = request.user.organisation_id,
offering_id = offering_id,all_invalid_data = False, dry_run=False, raise_errors=False)
context_data[constants.RESPONSE_ERROR] = False
context_data[constants.RESPONSE_MESSAGE] = 'Data Imported !!!'
context_data[constants.RESPONSE_RESULT] = {"Total records":student_resource.total_cnt,
"skip records":len(constants.invalid_data),
"Records imported": len(constants.valid_data),
}
return JsonResponse(context_data)

Pytest unittest function doesn't return any value

Could you please help me understand issue with below unittest?
Here's my function for which i am writing unittest.
def running_config_from_database(device):
try:
data = databaseproxy(cluster='https://xxx.xxxx.xxx.net')
datadb = 'test'
query = f'''fGetrunningconfigData('{device}')
'''
raw_data = data.execute_query(datadb, query)
# pdb.set_trace()
for items in raw_data.fetchall():
config = items['Config'].split('\r\n')
for index, line in enumerate(config):
if '$PASS$' in line:
if line.startswith('set groups ospf_test'):
config_line = line.replace('$PASS$', get_auth('ospf'))
config[index] = config_line
elif line.startswith('set groups rip_test'):
config_line = line.replace('$PASS$', get_auth('rsvp'))
config[index] = config_line
config = config + overload_config
return True, '\r\n'.join(config)
except Exception as e:
return False, f'Failed to get the running config from database, error: {e}'
Here's my unittest for this function:
#patch("scripts.test.overload_config")
#patch("scripts.test.get_auth")
#patch("scripts.test.databaseproxy.execute_query")
def test_running_config_from_database(self, mock_data, mock_cred, mock_overload):
ret = MagicMock()
ret.fetchall.return_value = [{'Hostname': 'devA', 'Config': 'set groups ospf_test secret $PASS$\r\n'}]
mock_data.return_value = ret
mock_cred.return_value = 'xyz'
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = tests.test_scripts.running_config_from_database('devA')
assert status and out1 == data
When I run this unittest to test the function, I get the below assertion error - looks like the function doesn't return any value.
#patch("scripts.test.overload_config")
#patch("scripts.test.get_auth")
#patch("scripts.test.databaseproxy.execute_query")
def test_running_config_from_database(self, mock_data, mock_cred, mock_overload):
ret = MagicMock()
ret.fetchall.return_value = [{'Hostname': 'devA', 'Config': 'set groups ospf_test secret $PASS$\r\n'}]
mock_data.return_value = ret
mock_cred.return_value = 'xyz'
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = tests.test_scripts.running_config_from_database('devA')
> assert status and out1 == data
E AssertionError: assert (True and '' == 'set groups d...rload_config2'
E + set groups ospf_test secret xyz
E +
E + sample_overload_config1
E + sample_overload_config2)
tests/test_scripts.py:80: AssertionError
I edited my function to reduce the complexity but it still doesn't work. not sure why.
Main Function:
==============
def running_config_from_database(device):
try:
pdb.set_trace()
config = running_config_database(device)
for index, line in enumerate(config):
if '$PASS$' in line:
if line.startswith('set groups ospf_test'):
config_line = line.replace('$PASS$', get_cred('ospf'))
config[index] = config_line
config = config + overload_config
return True, '\r\n'.join(config)
except Exception as e:
return False, f'Failed to get the running config from Database, error: {e}'
UnitTest Result for above Function:
=========================================================================================================== FAILURES ============================================================================================================
________________________________________________________________________________________________ test_running_config_from_database _________________________________________________________________________________________________
mock_cred = <MagicMock name='get_cred' id='140210277622336'>, mock_overload = ['sample_overload_config1', 'sample_overload_config2'], mock_running_config = <MagicMock name='running_config_database' id='140210277652128'>
#patch("test.test1.scripts.running_config_database")
#patch("test.test1.scripts.overload_config")
#patch("test.test1.scripts.get_cred")
def test_running_config_from_database(mock_cred, mock_overload, mock_running_config):
mock_running_config.return_value = ['set groups ospf_test secret $PASS$', '']
mock_cred.return_value = 'xyz'
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = test.test1.scripts.test_running_config_from_database('devA')
> assert status and out1 == data
E AssertionError: assert (True and '' == 'set groups d...rload_config2'
E + set groups ospf_test secret xyz
E +
E + sample_overload_config1
E + sample_overload_config2)
validation_tests/test_scripts.py:152: AssertionError
================================================================================================== 1 failed, 6 passed in 4.79s ==================================================================================================
The problem here is the assignment to mock_overload. If you want to adapt your mocked object you have to make sure that the object itself is changed. If you just assign another object (in this case, a list), your variable now points to the list object, while the original mock_overload is no longer referenced (and is not changed). So instead of writing:
mock_overload = ['sample_overload_config1', 'sample_overload_config2']
you can for example write
mock_overload[:] = ['sample_overload_config1', 'sample_overload_config2']
For clarification, here is a simplified version of the original code:
>>> mock_overload = []
>>> id(mock_overload)
1477793866440
>>> mock_overload = [5, 6]
>>> id(mock_overload)
1477791015560 <- changed id, no longer pointing to the mock
Now the same with the fixed code:
>>> mock_overload = []
>>> id(mock_overload)
140732764763024
>>> mock_overload[:] = [5, 6]
>>> id(mock_overload)
140732764763024 <- unchanged id, still points to the mock
Note that mock_overload[:] = [5, 6] is basically a shortcut for:
mock_object.clear()
mock_object.extend([5, 6])
Answer is already provided in comment section by #MrBean Bremen. here's the UT after making changes suggested.
#patch("scripts.test.overload_config")
#patch("scripts.test.get_auth")
#patch("scripts.test.databaseproxy.execute_query")
def test_running_config_from_database(self, mock_data, mock_cred, mock_overload):
ret = MagicMock()
ret.fetchall.return_value = [{'Hostname': 'devA', 'Config': 'set groups ospf_test secret $PASS$\r\n'}]
mock_data.return_value = ret
mock_cred.return_value = 'xyz'
***mock_overload[:]*** = ['sample_overload_config1', 'sample_overload_config2']
expected = ['set groups ospf_test secret xyz', '']
out = expected + mock_overload
data = '\r\n'.join(out)
status, out1 = tests.test_scripts.running_config_from_database('devA')
assert status and out1 == data

How to have bolded headers, but non-bolded cells - filled with worksheet.append_table of pygsheets==2.0.1

The python3 (pygsheets 2.0.1) script below will bold all the cells starting at A2.
Is there an easy way (i.e., in one command) to ask for all these cells not to be bolded?
Code:
import boto3, botocore
import datetime
import json
import pygsheets
currentDT = str(datetime.datetime.now())
def create_spreadsheet(outh_file, spreadsheet_name = "jSonar AWS usage"):
client = pygsheets.authorize(outh_file=outh_file, outh_nonlocal=True)
spread_sheet = client.create(spreadsheet_name)
return spread_sheet
def get_regions():
region = "us-west-1"
regions = dict()
ec2 = boto3.client("ec2", region_name=region)
ec2_responses = ec2.describe_regions()
ssm_client = boto3.client('ssm', region_name=region)
for resp in ec2_responses['Regions']:
region_id = resp['RegionName']
tmp = '/aws/service/global-infrastructure/regions/%s/longName' % region_id
ssm_response = ssm_client.get_parameter(Name = tmp)
region_name = ssm_response['Parameter']['Value']
regions[region_id] = region_name
return(regions)
def rds_worksheet_creation(spread_sheet, regions, spreadsheet_index):
worksheet = spread_sheet.add_worksheet("RDS", rows=100, cols=26, src_tuple=None, src_worksheet=None, index=spreadsheet_index)
worksheet.cell('A1').set_text_format('bold', True).value = 'DBInstanceIdentifier'
worksheet.cell('B1').set_text_format('bold', True).value = 'MasterUsername'
worksheet.cell('C1').set_text_format('bold', True).value = 'Region'
worksheet.cell('D1').set_text_format('bold', False).value = 'Sent Query to (Name)'
worksheet.cell('E1').set_text_format('bold', False).value = 'Sent Query to (email)'
worksheet.cell('F1').set_text_format('bold', False).value = 'WorksheetCreated: %s' % currentDT
cells_data = list()
for region, region_h in sorted(regions.items()):
client = boto3.client('rds', region_name=region)
clnt = boto3.client('ssm', region_name=region)
db_instances = client.describe_db_instances()
for instance in db_instances['DBInstances']:
MasterUsername = instance['MasterUsername']
DBInstanceIdentifier = instance['DBInstanceIdentifier']
cells_data.append([DBInstanceIdentifier, MasterUsername, region_h])
worksheet.append_table(cells_data, start='A2')
if __name__ == "__main__":
spread_sheet = create_spreadsheet(spreadsheet_name = "jSonar AWS usage",
outh_file = '/home/qa/.aws/client_secret.json')
regions = get_regions()
rds_worksheet_creation(spread_sheet, regions, 0)
spread_sheet.share("me#corp.com")
Output:
If i understand correctly you want to un-bold multiple cells in single command.
To set format to a range of cells create a Datarange and use apply_format.
model_cell = Cell('A1')
model_cell.set_text_format('bold', False)
Datarange('A1','A10', worksheet=wks).apply_format(model_cell)
docs

Python return all items from loop?

I cannot figure how to return all the items using this code:
#staticmethod
def create_dataset():
cols = Colleagues.get_all_colleagues()
cols_abs = ((col['Firstname'] + " " + col['Surname'], col['Absences']) for col in cols)
for col in cols_abs:
dataset = list()
sum_days = list()
for d in col[1]:
start_date = d[0]
end_date = d[1]
s = datetime.strptime(start_date, "%Y-%m-%d")
e = datetime.strptime(end_date, "%Y-%m-%d")
startdate = s.strftime("%b-%y")
days = numpy.busday_count(s, e) + 1
sum_days.append(days)
days_per_month = startdate, days
dataset.append(days_per_month)
dict_gen1 = dict(dataset)
comb_days = sum(sum_days)
dict_gen2 = {'Name': col[0], 'Spells': len(col[1]), 'Total(Days)': comb_days}
dict_comb = [{**dict_gen1, **dict_gen2}]
return dict_comb
It only returns the first "col". If I move the return statement outside of the loop it returns only the last item in my set of data. This is the output that is returned from col_abs:
('Jonny Briggs', [['2015-08-01', '2015-08-05'], ['2015-11-02', '2015-11-06'], ['2016-01-06', '2016-01-08'], ['2016-03-07', '2016-03-11']])
('Matt Monroe[['2015-12-08', '2015-12-11'], ['2016-05-23', '2016-05-26']])
('Marcia Jones', [['2016-02-02', '2016-02-04']])
('Pat Collins', [])
('Sofia Marowzich', [['2015-10-21', '2015-10-30'], ['2016-03-09', '2016-03-24']])
('Mickey Quinn', [['2016-06-06', '2016-06-08'], ['2016-01-18', '2016-01-21'], ['2016-07-21', '2016-07-22']])
('Jenifer Andersson', [])
('Jon Fletcher', [])
('James Gray', [['2016-04-01', '2016-04-06'], ['2016-07-04', '2016-07-07']])
('Matt Chambers', [['2016-05-02', '2016-05-04']])
Can anyone help me understand this better as I want to return a "dict_comb" for each entry in col_abs ?
Replace your return statement with a yield statement. This will allow your method to continue to loop while "yielding" or returning values after each iteration.

Resources