Issues with multiprocessing and import get pass - python-3.x

Trying to write a multiprocessing code using the import get pass module
import time
from multiprocessing import Pool
from multiprocessing import freeze_support
import getpass
import jaydebeapi
import pandas as pd
import numpy as np
pw = getpass.getpass(prompt="Password", stream=False)
# establishing database to the ODS database
ODS = jaydebeapi.connect(
'com.ibm.db2.jcc.DB2Driver',
'jdbc:db2://he3qlxvtdbs351.fhlmc.com:50001/DB2QLTY',
['f408195', pw],
'C:/JDBC/db2jcc.jar')
# Allows SQL statements between the ODS database
ODS = ODS.cursor()
# creating the password needed to establish PML database connection
pw_2 = getpass.getpass(prompt="Password", stream=False)
# establishing database to the PML database
PML = jaydebeapi.connect(
'com.ibm.db2.jcc.DB2Driver',
'jdbc:db2://he3qlxvtdbs957.fhlmc.com:50001/PMLFDB2',
['f408195', pw_2],
'C:/JDBC/db2jcc.jar')
# Allows SQL statements between the PML database
PML = PML.cursor()
def test(first_evnt, last_evnt):
PML_loan_Query = "select b.id_lpa_alt_loan from udbadm.pml_lst_cmpltd_trans_mtch a join udbadm.lpa_altv_loan_idtn b on a.id_evnt = b.id_evnt where b.cd_lpa_alt_loan_idtn = 'HewlettPackardGeneratedTransaction' and a.id_evnt BETWEEN ? AND ?"
PML.execute(PML_loan_Query,(first_evnt, last_evnt))
loan_records = PML.fetchall()
df = pd.DataFrame()
for x in loan_records:
# Populating the ODS table
#borr_query = "SELECT nbr_aus, CAST(NULLIF(NULLIF(cd_idx, -9999), 0.000000) AS VARCHAR(100)) AS cd_idx, CAST(rate_curr_int AS INT) AS rate_curr_int, CAST(NULLIF(rate_gr_mrtg_mrgn,0) AS INT) AS rate_gr_mrtg_mrgn, CAST(rate_loln_max_cap AS INT) AS rate_loln_max_cap, CAST(NULLIF(rate_perdc_cap,0) AS INT) AS rate_perdc_cap FROM DB2MANT.I_LP_TRANS WHERE nbr_trans_aus BETWEEN ? AND ?"
borr_query = 'SELECT nbr_aus, CAST(NULLIF(NULLIF(cd_idx, -9999), 0.000000) AS VARCHAR(10)) AS cd_idx, CAST(rate_curr_int AS VARCHAR(10)) AS rate_curr_int, CAST(NULLIF(rate_gr_mrtg_mrgn,0) AS VARCHAR(10)) AS rate_gr_mrtg_mrgn, CAST(rate_loln_max_cap AS VARCHAR(10)) AS rate_loln_max_cap, CAST(NULLIF(rate_perdc_cap,0) AS VARCHAR(10)) AS rate_perdc_cap FROM DB2MANT.I_LP_TRANS WHERE nbr_trans_aus IN (?)'
#borr_query = "SELECT DISTINCT nbr_aus FROM DB2MANT.I_LP_TRANS WHERE nbr_trans_aus BETWEEN ? AND ?"
ODS.execute(borr_query, x)
#ODS.execute(ODS_list)
ODS_records = ODS.fetchall()
ODS_records = df.append(pd.DataFrame(ODS_records, columns = ['nbr_aus', 'cd_idx', 'rate_curr_int', 'rate_gr_mrtg_mrgn', 'rate_loln_max_cap', 'rate_perdc_cap']))
return ODS_records
if __name__ == '__main__':
freeze_support()
first_evnt = 155643917
last_evnt = 155684481
p = Pool()
result = p.map(test, [first_evnt, last_evnt])
print(result)
p.close()
p.join()
I saved this script into a .py file and I tried to run it in the command prompt, it asked for my password for my ODS database, then my PML database, and then it seems to keep running the getpass command over and over again.
Below in a picture for reference.
enter image description here python script terminal

Related

How to load data from a connection string with vaex package?

If I have a table on my server and I am producing a connection string to it, how can I, using Vaex, load it to a dataframe?
Here is what I am doing but with Pandas:
from sqlalchemy import types, create_engine, text
import pandas as pd
import pymysql
def connect_to_data(driver='mysql+pymysql://', conn_string=''):
try:
conn = create_engine(driver + conn_string)
print("MySQL Connection Successfull!")
except Exception as err:
print("MySQL Connection Failed!")
print(err)
return conn
# Connect to the db:
conn_string = 'xxxxxxxx'
conn = connect_to_data(conn_string=conn_string)
# Get all requests from the db:
query = '''SELECT * FROM table_name'''
result = conn.execute(text(query))
# Desired dataframe:
df = pd.read_sql_query(query, conn)
How can I do the same with Vaex (because of it's high performance)?
For now at least, you can't do it directly. But vaex can easily read a pandas dataframe so you can
# Following your example..
pandas_df = pd.read_sql_query(query, conn)
df = vaex.from_pandas(pandas_df)

How to insert values in postgresql table by a python function and Psycopg?

I have a postgresql function in order to insert values that works fine in psql
CREATE FUNCTION new_msg(p1 type, p2 type)
RETURNS type AS
BEGIN
-- some logic
INSERT INTO table(col1, col2) values (p1,p2);
return value;
END;
LANGUAGE language_name;
A python function like ...
import psycopg2
from config import config
def new_msg(ref_p1, ref_p2):
# Configuracion
params = config()
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("select * from new_msg(%s,%s);",(ref_p1, ref_p1,))
But when the functions is called in python
new_msg(some_p1,some_p2)
the values are not inserted into the corresponding table and a error is not generated. I also tried callproc method from Psycopg2 is not working. Any suggestion? thanks.
Welcome to StackOverflow!
You need to commit your changes. You can try add cur.commit() after your code to do it every time you write to the DB, or you could try using a context manager to handle it all.
Create a file called mydb.py
import psycopg2
import psycopg2.pool
from contextlib import contextmanager
dbpool = psycopg2.pool.ThreadedConnectionPool(host=<<YourHost>>,
port=<<YourPort>>,
dbname=<<YourDB>>,
user=<<YourUser>>,
password=<<YourPassword>>,
)
#contextmanager
def db_cursor():
conn = dbpool.getconn()
try:
with conn.cursor() as cur:
yield cur
conn.commit()
except:
conn.rollback()
raise
finally:
dbpool.putconn(conn)
Then your code can use:
import mydb
def new_msg(ref_p1, ref_p2):
with mydb.db_cursor as cur:
cur.execute("""
select * from new_msg(%(ref_p1)s,%(ref_p2)s)
""", {
'ref_p1': ref_p1,
'ref_p2': ref_p2,
}
)
return

How to modify the PostgreSQL psycopg to accept variables instead of values

I am creating a dictionary attacking tool on PostgreSQL. The tool is inspired by the work of m8r0wn - enumdb tool. Mikes tool is aimed at MySQL and MSSQL. I aim to use the same approach he used but modify the actions and output file. The script should
1) Read a CSV file that contains targets and ports, one per line 127.0.0.1,3380.
2) when provided a list of usernames and/or passwords, it will cycle through each targeted host looking for valid credentials. By default, it will use newly discovered credentials to search for sensitive information in the host's databases via keyword searches on the table or column names.
3) This information can then be extracted and reported to a JSON, .csv or .xlsx output file.
I have a semi functional code, but I suspect the PostgreSQL connection function is not working due to the logic behind passing parameters. I am interested in suggestions on how best I could present the tools results as a JSON file.
I understand that in Python, we have several modules available to connect and work with PostgreSQL which include:
Psycopg2
pg8000
py-postgresql
PyGreSQL
ocpgdb
bpgsql
SQLAlchemy
see also https://www.a2hosting.co.za/kb/developer-corner/postgresql/connecting-to-postgresql-using-python
The connection methods I have tried include:
import psycopg2
from psycopg2 import Error
conn = psycopg2.connect(host=host, dbname=db_name, user=_user, password=_pass, port=port)
import pg
conn = pg.DB(host=args.hostname, user= _user, passwd= _pass)
sudo pip install pgdb
import pgdb
conn = pgdb.connect(host=args.hostname, user= _user, passwd= _pass)
I am not sure how to pass the different _user and _pass guesses into the pyscopg2 for instance, without breaking the code.
I have imported the following libraries
import re
import psycopg2
from psycopg2 import Error
import pgdb
#import MySQLdb
import pymssql
import argparse
from time import sleep
from sys import exit, argv
from getpass import getpass
from os import path, remove
from openpyxl import Workbook
from threading import Thread, activeCount
The PgSQL block is as follows:
##########################################
# PgSQL DB Class
##########################################
class pgsql():
def connect(self, host, port, user, passwd, verbose):
try:
con = pgdb.connect(host=host, port=port, user=user, password=passwd, connect_timeout=3)
con.query_timeout = 15
print_success("[*] Connection established {}:{}#{}".format(user,passwd,host))
return con
except Exception as e:
if verbose:
print_failure("[!] Login failed {}:{}#{}\t({})".format(user,passwd,host,e))
else:
print_failure("[!] Login failed {}:{}#{}".format(user, passwd, host))
return False
def db_query(self, con, cmd):
try:
cur = con.cursor()
cur.execute(cmd)
data = cur.fetchall()
cur.close()
except:
data = ''
return data
def get_databases(self, con):
databases = []
for x in self.db_query(con, 'SHOW DATABASES;'):
databases.append(x[0])
return databases
def get_tables(self, con, database):
tables = []
self.db_query(con, "USE {}".format(database))
for x in self.db_query(con, 'SHOW TABLES;'):
tables.append(x[0])
return tables
def get_columns(self, con, database, table):
# database var not used but kept to support mssql
columns = []
for x in self.db_query(con, 'SHOW COLUMNS FROM {}'.format(table)):
columns.append(x[0])
return columns
def get_data(self, con, database, table):
# database var not used but kept to support mssql
return self.db_query(con, 'SELECT * FROM {} LIMIT {}'.format(table, SELECT_LIMIT))
The MSSQL is as follows:
# MSSQL DB Class
class mssql():
def connect(self, host, port, user, passwd, verbose):
try:
con = pymssql.connect(server=host, port=port, user=user, password=passwd, login_timeout=3, timeout=15)
print_success("[*] Connection established {}:{}#{}".format(user,passwd,host))
return con
except Exception as e:
if verbose:
print_failure("[!] Login failed {}:{}#{}\t({})".format(user,passwd,host,e))
else:
print_failure("[!] Login failed {}:{}#{}".format(user, passwd, host))
return False
def db_query(self, con, cmd):
try:
cur = con.cursor()
cur.execute(cmd)
data = cur.fetchall()
cur.close()
except:
data = ''
return data
def get_databases(self, con):
databases = []
for x in self.db_query(con, 'SELECT NAME FROM sys.Databases;'):
databases.append(x[0])
return databases
def get_tables(self, con, database):
tables = []
for x in self.db_query(con, 'SELECT NAME FROM {}.sys.tables;'.format(database)):
tables.append(x[0])
return tables
def get_columns(self, con, database, table):
columns = []
for x in self.db_query(con, 'USE {};SELECT column_name FROM information_schema.columns WHERE table_name = \'{}\';'.format(database, table)):
columns.append(x[0])
return columns
def get_data(self, con, database, table):
return self.db_query(con, 'SELECT TOP({}) * FROM {}.dbo.{};'.format(SELECT_LIMIT, database, table))
The main function block:
def main(args):
try:
for t in args.target:
x = Thread(target=enum_db().db_main, args=(args, t,))
x.daemon = True
x.start()
# Do not exceed max threads
while activeCount() > args.max_threads:
sleep(0.001)
# Exit all threads before closing
while activeCount() > 1:
sleep(0.001)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
exit(0)
if __name__ == '__main__':
version = '1.0.7'
try:
args = argparse.ArgumentParser(description=("""
{0} (v{1})
--------------------------------------------------
Brute force Juggernaut is a PgSQL brute forcing tool.**""").format(argv[0], version), formatter_class=argparse.RawTextHelpFormatter, usage=argparse.SUPPRESS)
user = args.add_mutually_exclusive_group(required=True)
user.add_argument('-u', dest='users', type=str, action='append', help='Single username')
user.add_argument('-U', dest='users', default=False, type=lambda x: file_exists(args, x), help='Users.txt file')
passwd = args.add_mutually_exclusive_group()
passwd.add_argument('-p', dest='passwords', action='append', default=[], help='Single password')
passwd.add_argument('-P', dest='passwords', default=False, type=lambda x: file_exists(args, x), help='Password.txt file')
args.add_argument('-threads', dest='max_threads', type=int, default=3, help='Max threads (Default: 3)')
args.add_argument('-port', dest='port', type=int, default=0, help='Specify non-standard port')
args.add_argument('-r', '-report', dest='report', type=str, default=False, help='Output Report: csv, xlsx (Default: None)')
args.add_argument('-t', dest='dbtype', type=str, required=True, help='Database types currently supported: mssql, pgsql')
args.add_argument('-c', '-columns', dest="column_search", action='store_true', help="Search for key words in column names (Default: table names)")
args.add_argument('-v', dest="verbose", action='store_true', help="Show failed login notices & keyword matches with Empty data sets")
args.add_argument('-brute', dest="brute", action='store_true', help='Brute force only, do not enumerate')
args.add_argument(dest='target', nargs='+', help='Target database server(s)')
args = args.parse_args()
# Put target input into an array
args.target = list_targets(args.target[0])
# Get Password if not provided
if not args.passwords:
args.passwords = [getpass("Enter password, or continue with null-value: ")]
# Define default port based on dbtype
if args.port == 0: args.port = default_port(args.dbtype)
# Launch Main
print("\nStarting enumdb v{}\n".format(version) + "-" * 25)
main(args)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
exit(0)
I am aware that documentation states here http://initd.org/psycopg/docs/module.html states about how connection parameters can be specified. I would like to pass password guesses into the brute class and recursively try different combinations.
PEP-8 asks that you please give classes a name
starting with a capital letter, e.g. Pgsql.
You mentioned that the pgsql connect() method is not working properly,
but didn't offer any diagnostics such as a stack trace.
You seem to be working too hard, given that the sqlalchemy layer
has already addressed the DB porting issue quite nicely.
Just assemble a connect string starting with
the name of the appropriate DB package,
and let sqlalchemy take care of the rest.
All your methods accept con as an argument.
You really want to factor that out as the object attribute self.con.
The db_query() method apparently assumes that
arguments for WHERE clauses already appear, properly quoted, in cmd.
According to Little Bobby's mother,
it makes sense to accept query args according to the API,
rather than worrying about potential for SQL injection.

Inserting values in a table using psycopg2

I am trying to insert data in a "Dummy" table in postgres SQL using psycopg2 and faker library. This is a table that I have created only for learning purpose. It has only one column Student_name which is of type char[]. Below is my Python script
import psycopg2
from faker import Faker
fake = Faker()
conn = psycopg2.connect(database="kreiotdb", user="****", password="*****", host="127.0.0.1", port="5432")
print("Connected Successfuly")
cur = conn.cursor()
for i in range (10):
name = fake.name()
cur.execute(""" INSERT INTO "Dummy" ("Student_name") VALUES (%s);""",[name])
It is giving me the following error when I run the script. The connection is successful
Fri Nov 02 12:16:07 gaurav ~ $ python3 /Users/gaurav/Desktop/populate.py
Connected Successfuly
Traceback (most recent call last):
File "/Users/gaurav/Desktop/populate.py", line 11, in <module>
cur.execute(""" INSERT INTO "Dummy" ("Student_name") VALUES (%s);""",[name])
psycopg2.DataError: malformed array literal: "Brent Allison"
LINE 1: INSERT INTO "Dummy" ("Student_name") VALUES ('Brent Allison...
^
DETAIL: Array value must start with "{" or dimension information.
Why is it giving me this error and should I do ?
Please help.
import os
import csv
import sys
import psycopg2
import json
import csv
#import xlsxwriter
#import configparser
import psycopg2.extras
import psycopg2.extensions
#import logging
#import logging.config
import datetime
import zipfile
from subprocess import call
def db_connect():
dbconn = None
#if conf_section in config == False:
# print("Given section -> {0} is not exists in conf file.".format(conf_section))
# return None
dbhost = ""
dbport = ""
dbname = ""
dbuser = ""
dbpass = ""
try:
dbconn = psycopg2.connect(host=dbhost, port=dbport, dbname=dbname, user=dbuser, password=dbpass)
dbconn.autocommit = True
except Exception as e:
print(e)
return None
finally:
return dbconn
def execute_query(dbconn, query):
nrows = cursor = None
colnames = result = []
try :
cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query)
except Exception as e:
print(e)
return (0, colnames, result)
nrows = cursor.rowcount
colnames = [desc[0] for desc in cursor.description]
result = cursor.fetchall()
#cursor.close()
return (nrows)
def parse_csv(default_data):
with open('key.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
tid=row['TID']
mid=row ['MID']
key=row ['Exported Key ']
kcv=row ['KCV']
serial_no=row['HarwardSerialNo']
print ("TID="+tid+" MID="+mid+" EXPORTED KEY="+key+" KCV="+kcv)
request_data=default_data+key
request_data.replace(" ", "")
print (request_data)
cmd="/home/siva/HSM_REQ/hsm_comms.out 192.168.5.51 4000"+request_data
response_data=os.system(cmd)
print (response_data)
dbconn = db_connect()
query="select * from hsm_keys where serial_no ='"+serial_no+"'";
rows=execute_query(dbconn, query)
print (rows)
if (rows==0):
query="INSERT ";
print (query)
return()
def main():
header="0101303200"
head_len="1D"
fun_code="EE0200"
fun_mod="00"
key_len="05"
key_spc="081002"
key_index="0004"
key_type="0500"
len_of_key="10"
default_data=header+head_len+fun_code+fun_mod+key_len+key_spc+key_index+key_type+len_of_key
print (default_data)
parse_csv(default_data)
if __name__ == '__main__':
main()

Extracting data in separate sheet excel using Python

I want to write a python script from which I can execute multiple sql queries and the output of that query is saved in excel.
Suppose I have 4 sql query i.e Script1, Script2, Script3 & Script4 and I want to save the generated excel workbook in E:\Test, In that workbook sheet1 contains Script1 output, sheet2 contains Script2 output, sheet3 contains Script3 output, and so on. I have written a query but its working for only one script.
By using this script I am able to generate excel sheet with Test name, but How I run the remaining script so that their output will show in other sheet of same workbook
Please Help
import psycopg2
import sys
import pprint
import pandas as pd
import os
import openpyxl.cell
COMMASPACE = ', '
def main():
conn_string = "dbname='abc' user='qwerty' host='pqr' password='******' port='1234'"
script1 = """
select * From something1
"""
script2 = """
select * From something2
"""
script3 = """
select * From something3
"""
script4 = """
select * From something4
"""
pprint.pprint ('Making connection to the Database...')
con1 = psycopg2.connect(conn_string)
cur = con1.cursor()
pprint.pprint ('Execution Start')
cur.execute(script)
if not cur.rowcount:
pprint.pprint ('Oops! Error Occured')
else:
columns = [desc[0] for desc in cur.description]
data = cur.fetchall()
df = pd.DataFrame(list(data), columns=columns)
df.columns = map(str.upper, df.columns)
writer = pd.ExcelWriter('E:\\Test.xlsx')
df.to_excel(writer, sheet_name='Sheet1')
def hide_column(ws, column_id):
if isinstance(column_id, int):
assert column_id >= 1, "Column numbers must be 1 or greater"
column_id = openpyxl.cell.get_column_letter(column_id)
column_dimension = ws.column_dimensions[column_id]
column_dimension.hidden = True
writer.save()
print ("END of extraction")
if __name__ == "__main__":
main()
try using pandas read_sql with Sql Alchemy.
from openpyxl import load_workbook
from sqlalchemy import create_engine
import pandas as pd
# Parameters for SQL Alchemy
ServerName = "your_Server_Name"
Database = "Your_Database"
Driver = "Your_Driver"
# Create the connection
engine = create_engine('mssql+pyodbc://' + ServerName + '/' + Database + "?" + Driver)
# reading in the dataframes
df1 = pd.read_sql_query("select * from somewhere", engine)
df2 = pd.read_sql_query("select * from somewhere_else", engine)
# Using openpyxl to write to excel sheets
file = 'Your_file_path_Here'
book = load_workbook(file)
writer = pd.ExcelWriter(file, engine='openpyxl')
writer.book = book
# now start writing them to sheets
df1.to_excel(writer, index=None, sheet_name='SQL1')
df1.to_excel(writer, index=None, sheet_name='SQL2')

Resources