I am trying to use Python code to connect from GCP Cloud functions to PostgreSQL instance of CloudSQL . I am new to python , so struggling with some error and need help to fix the error
My code is as follows:
# Don't add the "/.s.PGSQL.5432 suffix" because it will already be added back automatically by the library...
import psycopg2
import sqlalchemy
from sqlalchemy import create_engine
engine = sqlalchemy.create_engine('postgresql+psycopg2://postgres:a1b2c3d4e190#X.X.X.X/postgres_dbase')
print("connected")
The function is deployed fine but the URL is failing with below error
Error: could not handle the request
It is showing below error in the log
Details:'Request' object has no attribute '_instantiate_plugins'
It works fine when I execute the code directly
$ python3 open.py
connected
I am using Python 3.7
UPDATE
It looks like I have to set DATABASE_URL but not sure how to set it in CLoud functions . Added below Runtime environmental variable in Cloud functions but no luck
DATABASE_URL="sqlite://"
https://github.com/sqlalchemy/sqlalchemy/issues/5330
Not sure if you are trying to connect from Public IP or from Private. However, here you can find an example about how to perform your concern using Public IP. Your code should looks like
db_user = os.environ["DB_USER"]
db_pass = os.environ["DB_PASS"]
db_name = os.environ["DB_NAME"]
db_socket_dir = os.environ.get("DB_SOCKET_DIR", "/cloudsql")
cloud_sql_connection_name = os.environ["CLOUD_SQL_CONNECTION_NAME"]
pool = sqlalchemy.create_engine(
# Equivalent URL:
# postgres+pg8000://<db_user>:<db_pass>#/<db_name>
# ?unix_sock=<socket_path>/<cloud_sql_instance_name>/.s.PGSQL.5432
sqlalchemy.engine.url.URL(
drivername="postgresql+pg8000",
username=db_user, # e.g. "my-database-user"
password=db_pass, # e.g. "my-database-password"
database=db_name, # e.g. "my-database-name"
query={
"unix_sock": "{}/{}/.s.PGSQL.5432".format(
db_socket_dir, # e.g. "/cloudsql"
cloud_sql_connection_name) # i.e "<PROJECT-NAME>:<INSTANCE-REGION>:<INSTANCE-NAME>"
}
),
**db_config
)
On the other hand, if you are using Private IP, you should have in your code something like this
db_user = os.environ["DB_USER"]
db_pass = os.environ["DB_PASS"]
db_name = os.environ["DB_NAME"]
db_host = os.environ["DB_HOST"]
# Extract host and port from db_host
host_args = db_host.split(":")
db_hostname, db_port = host_args[0], int(host_args[1])
pool = sqlalchemy.create_engine(
# Equivalent URL:
# postgres+pg8000://<db_user>:<db_pass>#<db_host>:<db_port>/<db_name>
sqlalchemy.engine.url.URL(
drivername="postgresql+pg8000",
username=db_user, # e.g. "my-database-user"
password=db_pass, # e.g. "my-database-password"
host=db_hostname, # e.g. "127.0.0.1"
port=db_port, # e.g. 5432
database=db_name # e.g. "my-database-name"
),
**db_config
)
Make sure you have configured your Cloud Functions depending on if you are using Public IP or Private IP.
This has worked for my connecting Google App Engine to PostgreSQL instance from Google Cloud Platform.
import psycopg2
try:
conn = psycopg2.connect(dbname='your_data_base_name', user='your_data_base_user', password='your_data_base_user_password', host='/cloudsql/project_name:region:database_instance_name')
except psycopg2.Error as e:
print("Error: Could not make connection to the Postgres database")
print(e)
cur = conn.cursor()
Related
I have not been able to connect to postgresql from google functions. This is the code - I'm following these instructions https://cloud.google.com/sql/docs/postgres/connect-functions. The actual output of my program is 1expected bytes, str found, as it manages to do the first print before raising the Exception.
I haven't even been able to get a more explicit error than this. Any ideas would be very much welcome :-).
import os
import sqlalchemy
import pg8000
def hello_world(request):
output = ''
try:
# Remember - storing secrets in plaintext is potentially unsafe. Consider using
# something like https://cloud.google.com/secret-manager/docs/overview to help keep
# secrets secret.
db_user = $DB_USER
db_pass = $PASS
db_name = $DB_NAME
db_socket_dir = os.environ.get("DB_SOCKET_DIR", "/cloudsql")
cloud_sql_connection_name = $CLOUD_SQL_CONNECTION_NAME
pool = sqlalchemy.create_engine(
# Equivalent URL:
# postgres+pg8000://<db_user>:<db_pass>#/<db_name>
# ?unix_sock=<socket_path>/<cloud_sql_instance_name>/.s.PGSQL.5432
sqlalchemy.engine.url.URL(
drivername="postgres+pg8000",
username=db_user, # e.g. "my-database-user"
password=db_pass, # e.g. "my-database-password"
database=db_name, # e.g. "my-database-name"
query={
"unix_sock": "%s/%s/.s.PGSQL.5432" % (
db_socket_dir, # e.g. "/cloudsql"
cloud_sql_connection_name) # i.e "<PROJECT-NAME>:<INSTANCE-REGION>:<INSTANCE-NAME>"
}
),
# ... Specify additional properties here.
)
output += '1'
pool.connect()
output += '2'
return output
except Exception as e:
output += str(e)
return output
I think you might be running into this issue of compatibility between SQLAlchemy and the latest version of pg8000: https://github.com/tlocke/pg8000/issues/53
OK, I got it. The parameters need to be bytes, so if db_user = "user", for example, I need to change that to db_user = b"user"
import pandas
import pygrametl
import psycopg2
from pygrametl.tables import SlowlyChangingDimension,CachedDimension,BulkDimension
from pygrametl.datasources import CSVSource
##Connection to PostGres
connection = psycopg2.connect(host="localhost",database="postgres", user="postgres",
password="tekihcan")
connect = pygrametl.ConnectionWrapper(connection)
def pgcopybulkloader(name, atts, fieldsep, rowsep, nullval, filehandle):
# Here we use driver-specific code to get fast bulk loading.
# You can change this method if you use another driver or you can
# use the FactTable or BatchFactTable classes (which don't require
# use of driver-specifc code) instead of the BulkFactTable class.
global connection
curs = connect.cursor()
try:
curs.copy_from(file=filehandle, table=name, sep=fieldsep,
columns=atts,null='null')
except(Exception, psycopg2.Database) as error:
print("Error %s" % error)
date_dim = BulkDimension(name='date_dim',key='d_date_sk',attributes=[
'd_date_id (B)'
,'d_date'
,'d_month_seq'
,'d_week_seq'
,'d_quarter_seq'
,'d_year'
,'d_dow'
,'d_moy'
,'d_dom'
,'d_qoy'
,'d_fy_year'
,'d_fy_quarter_seq'
,'d_fy_week_seq'
,'d_day_name'
,'d_quarter_name'
,'d_holiday'
,'d_weekend'
,'d_following_holiday'
,'d_first_dom'
,'d_last_dom'
,'d_same_day_ly'
,'d_same_day_lq'
,'d_current_day'
,'d_current_week'
,'d_current_month'
,'d_current_quarter'
,'d_current_year'
],lookupatts = ['d_date_id (B)'],
bulkloader = pgcopybulkloader)
date_dim_source = CSVSource(open('C:/Users/HP\Documents/v2.13.0rc1/data/date_dim.csv',
'r', 16384),delimiter='|')
def main():
for row in date_dim_source:
date_dim.insert(row)
The code is failing with error -
As per my understanding the error is caused because the target table is empty. The CSV source doesn't have header as well. Could this be impacting the code?
Please find the link that was used to develop the code - https://chrthomsen.github.io/pygrametl/
Could someone help me with the below error while connecting to Teradata from my Python environment.
I'm using ODBC driver method and I've tried all the below existing methods to connect but no luck.
Note: if you are using windows, you can directly use these methods, however the problem comes when you are on MAC OS (not for all though)
USING TERADATA Module and SQL Alchemy.
import teradata
import pyodbc
server='111.00.00.00'
username = 'user'
password = 'pwd'
udaExec = teradata.UdaExec(appName="test", version="1.0",
logConsole=True)
ndw_con = udaExec.connect(method = 'odbc',authentication = "LDAP",
system=server, username=username, password=password)
# SQL ALCHEMY from teradata
from sqlalchemy import create_engine
user = 'user'
pwd = 'pwd'
host = '1'11.00.00.00'
td_engine = create_engine('teradata://'+user+':'+pwd+'#'+host+':22/' )
result = td_engine.execute('select top 100 * from temp.sampledata')
# USING PYODBC: the below code gave me a new error saying ('01000', "
[01000]
[unixODBC][Driver Manager]Can't open lib 'Teradata' : file not found
(0) (SQLDriverConnect)")
import pyodbc
td_conn = pyodbc.connect('DRIVER= .
{Teradata};DBCName='+server+';UID='+username+';PWD='+ password,
automcommit=True)
cursor = td_conn.cursor()
Regardless, I was unable to made a connection to teradata, could someone let me know what's going on here and how to fix this issue once for all.
Thanks!
Found the answer using pyodbc module. Replaced Driver = {Teradata} parameter with full path where the driver is located, check below fpr the full connection string. Please note that this can only be used on MAC OS.
td_conn = pyodbc.connect('DRIVER={/Library/Application Support/teradata/client/16.20/lib/tdataodbc_sbu.dylib};DBCName='+server+';UID='+username+';PWD='+ password, automcommit=True, authentication = "LDAP")
Anyone knows How to make connection in python to connect as400 iseries system and call any as400 programs with parameter.
For example how to create library by connecting as400 through python. I want to call " CRTLIB LIB(TEST) " from python script.
I am able to connect to DB2 database through pyodbc package.
Here is my code to connect DB2 database.
import pyodbc
connection = pyodbc.connect(
driver='{iSeries Access ODBC Driver}',
system='ip/hostname',
uid='username',
pwd='password')
c1 = connection.cursor()
c1.execute('select * from libname.filename')
for row in c1:
print (row)
If your IBM i is set up to allow it, you can call the QCMDEXC stored procedure using CALL in your SQL. For example,
c1.execute("call qcmdexc('crtlib lib(test)')")
The QCMDEXC stored procedure lives in QSYS2 (the actual program object is QSYS2/QCMDEXC1) and does much the same as the familiar program of the same name that lives in QSYS, but the stored procedure is specifically meant to be called via SQL.
Of course, for this example to work, your connection profile has to have the proper authority to create libraries.
It's also possible that your IBM i isn't set up to allow this. I don't know exactly what goes into enabling this functionality, but where I work, we have one partition where the example shown above completes normally, and another partition where I get this instead:
pyodbc.Error: ('HY000', '[HY000] [IBM][System i Access ODBC Driver][DB2 for i5/OS]SQL0901 - SQL system error. (-901) (SQLExecDirectW)')
This gist shows how to connect to an AS/400 via pyodbc:
https://gist.github.com/BietteMaxime/6cfd5b2dc2624c094575
A few notes; in this example, SYSTEM is the DSN you're set up for the AS/400 in the with pyodbc.connect statement. You could also switch this to be SERVER and PORT with these modifications:
import pyodbc
class CommitMode:
NONE = 0 # Commit immediate (*NONE) --> QSQCLIPKGN
CS = 1 # Read committed (*CS) --> QSQCLIPKGS
CHG = 2 # Read uncommitted (*CHG) --> QSQCLIPKGC
ALL = 3 # Repeatable read (*ALL) --> QSQCLIPKGA
RR = 4 # Serializable (*RR) --> QSQCLIPKGL
class ConnectionType:
READ_WRITE = 0 # Read/Write (all SQL statements allowed)
READ_CALL = 1 # Read/Call (SELECT and CALL statements allowed)
READ_ONLY = 2 # Read-only (SELECT statements only)
def connstr(server, port, commit_mode=None, connection_type=None):
_connstr = 'DRIVER=iSeries Access ODBC Driver;SERVER={server};PORT={port};SIGNON=4;CCSID=1208;TRANSLATE=1;'.format(
server=server,
port=port,
)
if commit_mode is not None:
_connstr = _connstr + 'CommitMode=' + str(commit_mode) + ';'
if connection_type is not None:
_connstr = _connstr + 'ConnectionType=' + str(connection_type) + ';'
return _connstr
def main():
with pyodbc.connect(connstr('myas400.server.com', '8471', CommitMode.CHG, ConnectionType.READ_ONLY)) as db:
cursor = db.cursor()
cursor.execute(
"""
SELECT * FROM IASP.LIB.FILE
"""
)
for row in cursor:
print(' '.join(map(str, row)))
if __name__ == '__main__':
main()
I cleaned up some PEP-8 as well. Good luck!
I am trying to change the directory of the couch database. I am using a python script to import a csv file to the CouchDB. Script is running ok. Here it is just in case:
from couchdbkit import Server, Database
from couchdbkit.loaders import FileSystemDocsLoader
from csv import DictReader
import sys, subprocess, math, os
def parseDoc(doc):
for k,v in doc.items():
if (isinstance(v,str)):
#print k, v, v.isdigit()
# #see if this string is really an int or a float
if v.isdigit()==True: #int
doc[k] = int(v)
else: #try a float
try:
if math.isnan(float(v))==False:
doc[k] = float(v)
except:
pass
return doc
def upload(db, docs):
db.bulk_save(docs)
del docs
return list()
def uploadFile(fname, dbname):
#connect to the db
theServer = Server()
db = theServer.get_or_create_db(dbname)
#loop on file for upload
reader = DictReader(open(fname, 'rU'), dialect = 'excel')
docs = list()
checkpoint = 100
i = 0
for doc in reader:
newdoc = parseDoc(doc)
docs.append(newdoc)
if len(docs)%checkpoint==0:
docs = upload(db,docs)
i += 1
print 'Number : %d' %i
#don't forget the last batch
docs = upload(db,docs)
if __name__=='__main__':
x = '/media/volume1/Crimes_-_2001_to_present.csv'
filename = x
dbname = 'test'
uploadFile(filename, dbname)
I saw plenty posts on how to change the directory for appending the database. If I leave the /etc/couchdb/local.ini as it is (original after installation) the script is appending data to the default directory /var/lib/couchdb/1.0.1/. When I modify the local.ini to store the database to another disk:
database_dir = /media/volume1
view_index_dir = /media/volume1
and after the reboot of the CouchDB service I get this error :
restkit.errors.RequestError: socket.error: [Errno 111] Connection refused
I have checked the open sockets (couchdb uses 5984 as default) and it is not opened. But I get no errors when I start CouchDB service.
Any ideas how to fix it ?
I think the error may be due to you have changed the directory location in Local.ini but when you are trying to make new connection to existing database, it cannot find it there.
So move the database_name.couch file to new location which you can put in local.ini and then try to make a connection. I think this should work.