SQLAlchemy - insert from a result object? - python-3.x

The below query makes a result set in the variable 'result'
I need to insert that into the iconndest (the new MySQL server). But I have no idea how to insert the query result into the new table? I just want to do Insert into DB.TBL SELECT * FROM RESULT. But I am not sure how?
import mysql.connector
import pandas as pd
from sqlalchemy import create_engine
import multiprocessing as mp
from multiprocessing import cpu_count
try:
engine_source = create_engine("CONN STRING")
iconn = engine_source.connect()
result = iconn.execute('SELECT QUERY')
print('EXTRACT COMPLETE')
engine_dest = create_engine("CONN STRING")
iconndest = engine_dest.connect()
iconndest.execute('SELECT * from ')
engine_source.dispose()
engine_dest.dispose()
except Exception as e:
print('extract: ' + str(e))

What you describe is very simple if we use .mappings() to convert the list of Row objects to a list of RowMapping objects when we retrieve the results. RowMapping objects behave like dict objects when passed as parameter values:
import sqlalchemy as sa
source_engine = sa.create_engine("mssql+pyodbc://scott:tiger^5HHH#mssql_199")
destination_engine = sa.create_engine("sqlite://")
with source_engine.begin() as conn:
results = (
conn.exec_driver_sql(
"""\
SELECT 1 AS id, N'foo' AS txt
UNION ALL
SELECT 2 AS id, N'bar' AS txt
"""
)
.mappings()
.all()
)
print(results)
# [{'id': 1, 'txt': 'foo'}, {'id': 2, 'txt': 'bar'}]
destination_engine.echo = True
with destination_engine.begin() as conn:
conn.exec_driver_sql("CREATE TABLE t (id int, txt varchar(10))")
conn.execute(
sa.text("INSERT INTO t (id, txt) VALUES (:id, :txt)"), results
)
"""SQL emitted:
INSERT INTO t (id, txt) VALUES (?, ?)
[generated in 0.00038s] ((1, 'foo'), (2, 'bar'))
"""

Related

adding key:value from dictionary to sqlite3 db and retrieving them back

I am saving sequences with different ids associated with them as two column in sqlite3 DB where sequence is a column and ids_string are another column. I have problem with retrieving from the database
The dictionary is created as uniqe_sequence = [list of ids]
the sequence is a string of roughly 7000 characters or less and the list of ids could be up to 1 million characters
import sys
from Bio import SeqIO
from Bio.Seq import Seq
import time
import sqlite3
conn = sqlite3.connect('Sausql_012419.db')
c = conn.cursor()
c.execute("create table Sau (sequence text, list_of_ids text)")
for record in sequences_dict_d:
c.execute("insert into Sau values (?,?)", (record,'._/'.join(sequences_dict_d[record])))
conn.commit()
c.execute("SELECT COUNT(*) FROM Saureus_panproteome")
sql_count = c.fetchall()
print("saved sql database of {} proteins in --- {:.3f} seconds ---".format(sql_count[0][0],time.time() - start_time))
c.close()
conn.close()
#retrieval exact sequence match
for record in SeqIO.parse(queryfile, _format):
conn = sqlite3.connect('Sausql_012419.db')
c = conn.cursor()
c.execute('select list_of_ids from Sau where sequence = str(record.seq)')
print(c.fetchall()) # or do something with the list_of_ids

Check null values in data fetched from SQL database

I want to store the data in numpy arrays fetched from the database. I want to make sure that no null value (None) go to the numpy array(throws an error anyway doing that). I have tried to do it the following way but it does not work. For some reason, NullValueCheck() always returns true How can I know about null values and do something about it?
import numpy as np
import pyodbc as odbc
cnxn = odbc.connect(conn_string)
cursor = cnxn.cursor()
cursor.execute("""SELECT ID, BuildingID, Title FROM Something"""")
rows = cursor.fetchall()
cnxn.close()
ID = [i[0] for i in rows]
buildingID = [i[1] for i in rows]
title = [i[2] for i in rows]
def NullValueCheck(rows):
if (any(elem is None for elem in rows[0])):
return True
else:
return False
if NullValueCheck(rows):
ID_array = np.fromiter(ID, dtype= np.int32)
Edit:
It turns out that I don't have to write all that code. I can achieve the same using pandas dataframe that I want to achieve from numpy array.
import pandas as pd
import pyodbc as odbc
cnxn = odbc.connect(conn_string)
df = pd.io.sql.read_sql("""SELECT ID, BuildingID, Title FROM Something""", cnxn)
I've found this is easiest to address in your source SQL. COALESCE is helpful here:
df = pd.io.sql.read_sql("""SELECT ID, COALESCE(BuildingID, 0) AS BuildingID, Title FROM Something""", cnxn)
This will return 0 if the value of BuildingID is NULL. Different SQL databases have functions for specific NULL checking (ISNULL in SQL Server, IFNULL in MySQL, for example), but COALESCE is the most cross database compatible.

how to translate datebase tables(sqlite3) to numpy array

a datebase contained 80 tables ,every table is a 18000x4 matrix ,how can I translate these to a 80x18000x4 numpy array?
the db : https://drive.google.com/open?id=0B3bNSNFik5wGdm1DYnJwNHBJMVU
I wrote a function. Do you have any better idea?
import sqlite3 as sql
import os
def db_to_array(db,r,c):
db = sql.connect(db)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
wd=os.getcwd()
if not os.path .exists(wd +'/temp/'):
os.makedirs(wd +'/temp/')
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, db)
table.to_csv(wd +'/temp/'+ table_name + '.csv', index_label='index')
ss=os.listdir(wd +'/temp/')
print(len(ss))
dd=np.empty([len(ss),r,c])
for i in range(len(ss)):
ddd=np.array(pd.read_csv(wd +'/temp/'+ss[i]))
print(i)
print(ddd.shape)
dd[i,:,:]=ddd[:,0:r]
return dd

SQLAlchemy: column_property jsonb operation

My Test table has a JSONB column data:
class Test(Base):
__tablename__ = 'test'
data = Column(JSONB)
A typical document has two lists:
{'percentage': [10, 20, 50, 80, 90],
'age': [1.21, 2.65, 5.23, 8.65, 11.78]
}
With a column_property I would like to tailor this two lists so it is available as a dictionary. In "open field" Python this is straightforward:
dict(zip(Test.data['percentage'], Test.data['age']))
But with a column_property:
Test.data_dict = column_property(
dict(zip(Test.data['percentage'], Test.data['age']))
)
this gives:
AttributeError: 'dict' object has no attribute 'label'
Is this actually possible and how should this been done?
Does it solves your problem?
#property
def data_dict(self):
return dict(zip(Test.data['percentage'], Test.data['age']))
In PostgreSQL it would something like this (for PostgreSQL >= 9.4)
SELECT json_object(array_agg(ARRAY[p,a]))
FROM (
SELECT unnest(ARRAY(select jsonb_array_elements_text(data->'percentage'))) p,
unnest(ARRAY(select jsonb_array_elements_text(data->'age'))) a
FROM test
) x;
In SQLAlchemy
from sqlalchemy.orm import column_property
from sqlalchemy import select, alias, text
class Test(Base):
__tablename__ = 'test'
data = db.Column(JSONB)
data_dict = column_property(
select([text('json_object(array_agg(ARRAY[p,a]))')]).select_from(
alias(select([
text("unnest(ARRAY(select jsonb_array_elements_text(data->'percentage'))) p, \
unnest(ARRAY(select jsonb_array_elements_text(data->'age'))) a")
]).select_from(text('test')))
)
)

How to delete data based on rowid

Using python, how do i get to delete data based on lastrowid. The code i have deletes all the rows
CODE:
import re
import sys
import difflib
import sqlite3
def main():
while True:
name = input ('Please Type your Question: ').lower().split()
name2 = name[:]
import sqlite3
for item in name2:#break
conn = sqlite3.connect("foods.db")
cursor = conn.cursor()
cursor.execute("INSERT INTO INPUT33 (NAME) VALUES (?);", (name2,))
cursor.execute("select MAX(rowid) from [input33];")
conn.commit()
for rowid in cursor:break
for elem in rowid:
m = elem
print(m)
cursor.execute("DELETE FROM INPUT33 (NAME) WHERE NAME = name")
To get the last inserted rowid, use the cursor's lastrowid attribute.
To delete a record with a specific rowid, use that column in the WHERE condition:
cursor.execute("INSERT INTO input33(Name) VALUES(?)", ("whatever",))
rowid = cursor.lastrowid
cursor.execute("DELETE FROM input33 WHERE rowid = ?", (rowid,))

Resources