Our Oracle database is upgraded to a new server, so it has a new server name. Most of our published workbooks on Tableau Server are connecting to this Oracle database. The username and password remains the same, but the server address is changed. I used the following Python code. It can identify the right workbook that needs server address update, however it produces an error: 'PW Update Failed with error:
404004: Resource Not Found
Datasource '5f125136-22da-48d0-bdc7-8e5edde8d809' could not be found.
'''
import tableauserverclient as TSC
import re
tableau_auth = TSC.TableauAuth('site_admin_username', 'site_admin_password', site_id='default') # site_id not needed if there is only one
search_server_regex = 'oldserver123' # server to search
replace_server = 'newserver123' # use if server name/address is changing- otherwise make it the same as search_server
overwrite_credentials = False # set to false to use existing credentials
search_for_certain_users = True # set to True if you only want to update connections for certain usernames
search_username = 'username'
replace_username = 'username'
replace_pw = 'password'
request_options = TSC.RequestOptions(pagesize=1000) # this needs to be > # of workbooks/data connections on the site
server = TSC.Server('http://tableau_server:8000') # tableau server
y = 0 # to keep track of how many are changed
try:
with server.auth.sign_in(tableau_auth):
all_workbooks, pagination_item = server.workbooks.get(req_options=request_options)
print("Total Workbooks to Search: {}".format(len(all_workbooks)))
for wb in all_workbooks:
server.workbooks.populate_connections(wb)
for item,conn in enumerate(wb.connections): #make sure to iterate through all connections in the workbook
if wb.connections[item].connection_type != 'sqlproxy': #sqlproxy indicates published datasource
if re.search(search_server_regex ,wb.connections[item].server_address,re.IGNORECASE):
connection = wb.connections[item]
if search_for_certain_users and re.search(search_username, connection.username, re.IGNORECASE):
# print(wb.name, '-', connection.connection_type)
connection.server_address = replace_server
connection.embed_password = False
if overwrite_credentials:
connection.embed_password = True
connection.username = replace_username
connection.password = replace_pw
server.datasources.update_connection(wb, connection)
y = y + 1
elif not search_for_certain_users:
# print(wb.name, '-', connection.connection_type)
connection.server_address = replace_server
connection.embed_password = False
if overwrite_credentials:
connection.embed_password = True
connection.username = replace_username
connection.password = replace_pw
server.datasources.update_connection(wb, connection)
y = y + 1
print("Workbook Connections Changed: {}".format(y))
except Exception as e:
print("PW Update Failed with error: {}".format(e))
print("Connections Updated: {}".format(y))
'''
How to fix the code?
you have to update the workbooks.
server.workbooks.update_connection(wb, connection)
I ran into the same problem and I'm hoping my solution fixes yours. I created a "helper" class that has one attribute called "id":
class datasource_id:
def __init__(self, id):
self.id = id
I put the class at the top of my code. Then I replaced the lines:
if overwrite_credentials:
connection.embed_password = True
connection.username = replace_username
connection.password = replace_pw
server.datasources.update_connection(wb, connection)
with the code below in both places:
if overwrite_credentials:
connection.embed_password = True
connection.username = replace_username
connection.password = replace_pw
d1 = datasource_id(wb.connections[item].datasource_id)
server.datasources.update_connection(d1, connection)
The reason this work is because the method .update_connections is using the argument in the id position of the "wb" as the datasource_id which isn't correct because the id position of the "wb" variable is the id of the workbook
Related
I'm a new Django programmer. I write a API call with rest_framework in Django. when call this API, my program connect to KUCOIN and get list of all cryptocurrency. I want save symbol and name this cryptocurrencies in database. For save data to database, I use 'for loop' and in every for loop iteration, I query to database and save data. my code :
for currencie in currencies:
name = currencie['name']
symbol = currencie['symbol']
active = (False, True)[symbol.endswith('USDT')]
oms = 'kucoin'
try:
obj = Instrument.objects.get(symbol=symbol, oms=oms)
setattr(obj, 'name', name)
setattr(obj, 'active', active)
obj.save()
except Instrument.DoesNotExist:
obj = Instrument(name=name, symbol=symbol,
active=active, oms=oms)
obj.save()
query to database in every for loop iteration have problem ,How can I solve this problem?
Exist any way in Django to save data in database in with one query.
All my code:
class getKucoinInstrument(APIView):
def post(self, request):
try:
person = Client.objects.filter(data_provider=True).first()
person_data = ClientSerializer(person, many=False).data
api_key = person_data['api_key']
api_secret = person_data['secret_key']
api_passphrase = person_data['api_passphrase']
client = kucoin_client(api_key, api_secret, api_passphrase)
currencies = client.get_symbols()
for currencie in currencies:
name = currencie['name']
symbol = currencie['symbol']
active = (False, True)[symbol.endswith('USDT')]
oms = 'kucoin'
try:
obj = Instrument.objects.get(symbol=symbol, oms=oms)
setattr(obj, 'name', name)
setattr(obj, 'active', active)
obj.save()
except Instrument.DoesNotExist:
obj = Instrument(name=name, symbol=symbol,
active=active, oms=oms)
obj.save()
return Response({'response': 'Instruments get from kucoin'}, status=status.HTTP_200_OK)
except Exception as e:
print(e)
return Response({'response': 'Internal server error'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
Thank you for you help.
Yes! Take a look at bulk_create() documentation. https://docs.djangoproject.com/en/4.0/ref/models/querysets/#bulk-create
If you have a db that supports ignore_conflicts parameter (all do, except Oracle), you can do this:
new_currencies = []
for currencie in currencies:
name = currencie['name']
symbol = currencie['symbol']
active = (False, True)[symbol.endswith('USDT')]
oms = 'kucoin'
new_currencies.append(Instrument(name=name, symbol=symbol,
active=active, oms=oms))
Instrument.objects.bulk_create(new_currencies, ignore_conflicts=True)
1-liner:
Instrument.objects.bulk_create(
[
Instrument(
name=currencie['name'], symbol=currencie['symbol'],
active=currencie['symbol'].endswith('USDT'), oms='kucoin'
)
for currencie in currencies
],
ignore_conflicts=True
)
I am trying to update a handful of fields if the record has been marked as deleted. When I debug, I can see that the values are being assigned in the view, but not being updated in the DB. I'm not sure what I'm missing.
views.py
contact_formset = ContactFormSet(request.POST, prefix='contact')
for contact in contact_formset.deleted_forms:
contact = contact.cleaned_data
print(contact)
instance = contact['id_cnt']
contact_id = instance.id_cnt
contact_object = get_object_or_404(AppContactCnt, id_cnt=contact_id)
contact_object.deleted_cnt = True
contact_object.date_deleted_cnt = '2020-11-23'
contact_object.deleted_by_cnt = 'adeacon'
contact_object.save()
Included the delete code with the edit code. Seemed to do the trick:
if '_edit_contact_formset' in request.POST:
contact_formset = ContactFormSet(request.POST, prefix='contact')
for contact_form in contact_formset:
if contact_form.is_valid() and contact_form.has_changed():
contact_data = contact_form.cleaned_data
# print(contact_data)
customer_obj = get_object_or_404(AppCustomerCst, id_cst=id)
contact_form.instance.idcst_cnt = customer_obj
contact_form.instance.idtrp_cnt = '-1'
if contact_data['DELETE'] is True:
print('delete me')
contact_form.instance.deleted_cnt = True
contact_form.instance.date_deleted_cnt = '2020-11-23'
contact_form.instance.deleted_by_cnt = 'adeacon'
messages.info(request, 'Contact deleted successfully')
contact_form.save()
formset = ContactFormSet(
queryset=AppContactCnt.objects.filter(idcst_cnt=id).exclude(deleted_cnt=True).select_related('idctp_cnt'),
prefix='contact')
if contact_data['DELETE'] is not True:
messages.info(request, 'Contacts updated successfully!')
I have a requirement to create an automated password reset script. I created a custom field in order to try and track this and also hope I can access some of the standard fields. This script should find users with the following criteria:
The latest of any of the following 3 dates that are >= 90 days ago : Sign_Up, Forgot_Password, or custom:pwdCreateDate
I can't seem to find any boto3 cognito client ways of getting the information on this except for forgot password which shows up in admin_list_user_auth_events and that response doesn't include username in the response. I suppose since you provide username to get the events you can figure out a way to find the latest forgot password from the events and tie it to the username.
Has anyone else implemented any boto3 automation to set the account to force password reset based on any of these fields?
here is where i landed, take it with the understanding that coginito has some limitations which make true flawless password rotation difficult. Also know if you can make the script more efficient you should because in lambda you probably time out with more than about 350 users due to the 5RPS on the admin API.
Prerequisites : set the lambda function to 5 concurrency or you will exceed the limit of 5RPS. 1 mutable field in your cognito userpool attributes to put a date in. a custom lambda zip file that includes pandas saved to s3.
import os
import sys
# this adds the parent directory of bin so we can find the module
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parent_dir)
#This addes venv lib/python2.7/site-packages/ to the search path
mod_path = os.path.abspath(parent_dir+"/lib/python"+str(sys.version_info[0])+"."+str(sys.version_info[1])+"/site-packages/")
sys.path.append(mod_path)
import boto3
import datetime
import pandas as pd
import time
current_path = os.path.dirname(os.path.realpath(__file__))
# Use this one for the parent directory
ENV_ROOT = os.path.abspath(os.path.join(current_path, os.path.pardir))
# Use this one for the current directory
#ENV_ROOT = os.path.abspath(os.path.join(current_path))
sys.path.append(ENV_ROOT)
#if __name__ == "__main__":
def lambda_handler(event, context):
user_pool_id = os.environ['USER_POOL_ID']
idp_client = boto3.client('cognito-idp')
users_list = []
page_token = None
dateToday = datetime.datetime.today().date()
def update_user(user) :
idp_client.admin_update_user_attributes(
UserPoolId = user_pool_id,
Username = user,
UserStatus = 'RESET_REQUIRED',
UserAttributes = [
{
'Name': 'custom:pwdCreateDate',
'Value': str(dateToday)
}
]
)
users = idp_client.list_users(
UserPoolId = user_pool_id
)
for user in users['Users']: users_list.append(user['Username'])
page_token = users['PaginationToken']
while 'PaginationToken' in users :
users = idp_client.list_users(
UserPoolId = user_pool_id,
PaginationToken = page_token
)
for user in users["Users"]: users_list.append(user["Username"])
if 'PaginationToken' in users :
page_token = users['PaginationToken']
attrPwdDates = []
for i in range(len(users_list)) :
userAttributes = idp_client.admin_get_user(
UserPoolId = user_pool_id,
Username = users_list[i]
)
for a in userAttributes['UserAttributes'] :
if a['Name'] == 'custom:pwdCreateDate' :
attrPwdDates.append(datetime.datetime.strptime(a['Value'], '%Y-%m-%d %H:%M:%S.%f').date())
time.sleep(1.0)
list_of_userattr_tuples = list(zip(users_list, attrPwdDates))
df1 = pd.DataFrame(list_of_userattr_tuples,columns = ['Username','Password_Last_Set'])
authPwdDates = []
for i in range(len(users_list)) :
authEvents = idp_client.admin_list_user_auth_events(
UserPoolId = user_pool_id,
Username = users_list[i]
)
for event in authEvents['AuthEvents'] :
if event['EventType'] == 'ForgotPassword' and event['EventResponse'] == 'Pass' :
authPwdDates.append(event['CreationDate'].date())
break
time.sleep(1.0)
list_of_userauth_tuples = list(zip(users_list, authPwdDates))
df2 = pd.DataFrame(list_of_userauth_tuples,columns = ['Username','Password_Last_Forgot'])
df3 = df1.merge(df2,how='left', on = 'Username')
df3[['Password_Last_Set','Password_Last_Forgot']] = df3[['Password_Last_Set','Password_Last_Forgot']].apply(pd.to_datetime)
cols = ['Password_Last_Set','Password_Last_Forgot']
df4 = df3.loc[df3[cols].max(axis=1)<=pd.Timestamp.now() - pd.Timedelta(90, unit='d'), 'Username']
for i,r in df4.iterrows() :
update_user(r['Username'])
I can not get the above information using github.api. Reading the documentation did not help much. There is still no complete understanding of the work with dates. Here is an example of my code for getting open issues:
import requests
import json
from datetime import datetime
username = '\'
password = '\'
another_page = True
opened = 0
closed = 0
api_oldest = 'https://api.github.com/repos/grpc/grpc/issues?
per_page=5&q=sort=created:>`date -v-14d "+%Y-%m-%d"`&order=asc'
api_issue = 'https://api.github.com/repos/grpc/grpc/issues?
page=1&per_page=5000'
api_pulls = 'https://api.github.com/repos/grpc/grpc/pulls?page=1'
datetime.now()
while another_page:
r = requests.get(api_issue, auth=(username, password))
#json_response = json.loads(r.text)
#results.append(json_response)
if 'next' in r.links:
api_issue = r.links['next']['url']
if item['state'] == 'open':
opened += 1
else:
closed += 1
else:
another_page=False
datetime.now()
print(opened)
There are a few issues with your code. For example, what does item represent ?. Your code can be modified as follows to iterate and get the number of open issues .
import requests
username = '/'
password = '/'
another_page = True
opened = 0
closed = 0
api_issue = "https://api.github.com/repos/grpc/grpc/issues?page=1&per_page=5000"
while another_page:
r = requests.get(api_issue, auth=(username, password))
json_response = r.json()
#results.append(json_response)
for item in json_response:
if item['state'] == 'open':
opened += 1
else:
closed += 1
if 'next' in r.links:
api_issue = r.links['next']['url']
else:
another_page=False
print(opened)
If you want issues that were created in the last 14 days, you could make the api request using the following URL.
api_oldest = "https://api.github.com/repos/grpc/grpc/issues?q=sort=created:>`date -d '14 days ago'`&order=asc"
Hi I am trying to unload multiple tables from Redshift to a particular S3 bucket getting below error:
psycopg2.InternalError: Specified unload destination on S3 is not empty. Consider using a different bucket / prefix, manually removing the target files in S3, or using the ALLOWOVERWRITE option.
if I add 'allowoverwrite' option on unload_function, it is overwritting before table and unloading last table in S3.
This is the code I have given:
import psycopg2
def unload_data(r_conn, aws_iam_role, datastoring_path, region, table_name):
unload = '''unload ('select * from {}')
to '{}'
credentials 'aws_iam_role={}'
manifest
gzip
delimiter ',' addquotes escape parallel off '''.format(table_name, datastoring_path, aws_iam_role)
print ("Exporting table to datastoring_path")
cur = r_conn.cursor()
cur.execute(unload)
r_conn.commit()
def main():
host_rs = 'dataingestion.*********.us******2.redshift.amazonaws.com'
port_rs = '5439'
database_rs = '******'
user_rs = '******'
password_rs = '********'
rs_tables = [ 'Employee', 'Employe_details' ]
iam_role = 'arn:aws:iam::************:role/RedshiftCopyUnload'
s3_datastoring_path = 's3://mysamplebuck/'
s3_region = 'us_*****_2'
print ("Exporting from source")
src_conn = psycopg2.connect(host = host_rs,
port = port_rs,
database = database_rs,
user = user_rs,
password = password_rs)
print ("Connected to RS")
for i, tabe in enumerate(rs_tables):
if tabe[0] == tabe[-1]:
print("No files to read!")
unload_data(src_conn, aws_iam_role = iam_role, datastoring_path = s3_datastoring_path, region = s3_region, table_name = rs_tables[i])
print (rs_tables[i])
if __name__=="__main__":
main()
It is complaining that you are saving the data to the same destination.
This would be like copying all the files on your computer to the same directory -- there will be files overwritten.
You should change your datastoring_path to be different for each table, such as:
.format(table_name, datastoring_path + '/' + table_name, aws_iam_role)