Append the columns and put one after other - python-3.x

I have the following dataframe:
a1
p1
a2
p2
a3
p3
...
1
2
3
4
5
6
...
7
8
9
10
11
12
...
And, I want to create a dataframe like this:
a
p
1
2
7
8
3
4
9
10
5
6
11
12
...
...

Do you need something like this?
import pandas as pd
import random
a = [random.randint(1,100) for i in range(178) ]
b = [random.randint(1,100) for i in range(178) ]
#Original DF
df = pd.DataFrame({"0": a, "1": b})
#New DF
cf = pd.DataFrame()
cf["Actual Value"] = df["0"]
cf["Predicted Value"] = df["1"]
cf["Error Value"] = df["0"] - df["1"]
cf["Id"] = ["Bin" + str(i + 1) for i in range(178) ]
I misunderstand you with previous one.
This should fit you:
import pandas as pd
import random
#Original DF
d = {}
for i in range(int(178)):
a = [random.randint(1,100) for i in range(10) ]
b = [random.randint(1,100) for i in range(10) ]
d["Bin " + str(i+1)] = pd.DataFrame({"0": a, "1": b})
con = pd.concat(d, axis=1)
colum = []
for i in range(178*2):
if i%2 == 0:
colum.append("0")
else:
colum.append("1")
con.columns = colum
#New DF
result = []
count = 0
for i in range(0,178*2,2):
a = con.iloc[:,[i,i+1]]
a["Error"] = a["0"] - a["1"]
a["Bucked"] = "Bin: " + str(count)
result.append(a)
count = count + 1
result = pd.concat(result)
result
Result image:

Related

Python(AI Constraint satisfaction problem) Fitting square and/or rectangular (2d) tiles onto a rectangular ground

I have to arrange and/or fit 2d tiles into a 2d square or rectangular plane with AI algorithm using python program. Each tile has a length and width. For example if a plane is 4x3 and set of tiles is
S={(2,3),(1,2),(2,2)}
these tiles can be rotated 90 degrees in order to fit the matrix.
input
first line contains length and width of the plane
second line number of tiles
and then the length,width of the subsequent tiles
but the inputs should be tab seperated
for eg
4 3
3
2 3
1 2
2 2
output
for eg
1 1 2 2
1 1 3 3
1 1 3 3
I have trouble solving this as i have to use only standard libraries in python no NumPy and no CSP library
~Edit 2`
my code so far I cant figure out how to add algorithm without csp library or to generate grid
from sys import stdin
a = stdin.readline()
x = a.split()
rectangular_plane = [[0] * int(x[0]) for i in range(int(x[1]))]
num_of_rectangles = stdin.readline()
r_widths = []
r_lengths= []
for l in range(int(num_of_rectangles)):
b = stdin.readline()
y = b.split()
r_lengths.insert(l,y[0])
r_widths.insert(l,y[1])
I've solved task with backtracking approach and without any non-standard modules.
Try it online!
import sys
nums = list(map(int, sys.stdin.read().split()))
pw, ph = nums[0:2]
ts = list(zip(nums[3::2], nums[4::2]))
assert len(ts) == nums[2]
if sum([e[0] * e[1] for e in ts]) != pw * ph:
print('Not possible!')
else:
def Solve(*, it = 0, p = None):
if p is None:
p = [[0] * pw for i in range(ph)]
if it >= len(ts):
for e0 in p:
for e1 in e0:
print(e1, end = ' ')
print()
return True
for tw, th in [(ts[it][0], ts[it][1]), (ts[it][1], ts[it][0])]:
zw = [0] * tw
ow = [it + 1] * tw
for i in range(ph - th + 1):
for j in range(pw - tw + 1):
if all(p[k][j : j + tw] == zw for k in range(i, i + th)):
for k in range(i, i + th):
p[k][j : j + tw] = ow
if Solve(it = it + 1, p = p):
return True
for k in range(i, i + th):
p[k][j : j + tw] = zw
return False
if not Solve():
print('Not possible!')
Example input:
4 3
3
2 3
1 2
2 2
Output:
1 1 2 2
1 1 3 3
1 1 3 3

Error: TypeError: cannot perform reduce with flexible type

i am using python version 3.7.Below is the code in which I am performing operation along the rows. i want the mean of the data which are along the rows but I get an error. i am new to numpy and python. i am reading the data from text file.
My code is:
import numpy as np
def getIndexFromDatetime(date_from, date_to):
'''date_from = [2, 10] : 10oclock of day2
'''
if date_from[1] > 24 or date_to[1] > 24: print('error')
start = (date_from[0] - 1) * 48 + date_from[1] * 2
end = (date_to[0] - 1) * 48 + date_to[1] * 2
return [start, end]
def is_num(s):
return s.replace(',', '').replace('.', '').replace('-', '').isnumeric()
def get_dataset(fpath):
with open(fpath, 'r') as f:
cnt = 0
DataWeather = {}
header = []
dtime = []
temp1 = []
temp2 = []
for line in f:
terms = line.split('\t')
#print(terms)
if cnt == 0: header1 = terms
if cnt == 1: header2 = terms
#header.append(terms[3])
cnt += 1
if cnt == 2:
for i in range(len(header1)):
header.append(header1[i]+header2[i])
#print(header)
for i in range(len(terms)):
DataWeather[header[i]] = []
#break
if cnt > 2:
for i in range(len(terms)):
if is_num(terms[i]):
DataWeather[header[i]].append(float(terms[i]))
else:
DataWeather[header[i]].append(terms[i])
for i in range(len(DataWeather[header[0]])):
dtime.append(DataWeather[header[0]][i]+' '+DataWeather[header[1]][i])
return DataWeather, header
def get_data(dataset, header, idx):
y = dataset[header][idx[0]:idx[1]]
return y
data_dir = 'weather_data'
month = 3
day = list(range(1,10))
header_idx = [2,3,4,5,7,16]
for d in day:
print(d)
dtime_from = [d, 9]
dtime_to = [d, 18]
dtime_idx = getIndexFromDatetime(dtime_from, dtime_to)
fpath = '{0}/2019-{1:02}.txt'.format(data_dir, month)
dataset, header = get_dataset(fpath)
for h in header_idx:
print(fpath)
print(header[h], dtime_from, dtime_to, dtime_idx)
data = get_data(dataset, header[h], dtime_idx)
#data= list(map(float,np.array(data)))
#data = map(np.array(data, dtype=np.float))
print(data)
print(np.mean(data))
i am getting the following error:
ret = umr_sum(arr, axis, dtype, out, keepdims)
TypeError: cannot perform reduce with flexible type
i also tried some functions like "map" and "list" as commented in the code still it gives error of converting string to float.

Looping over pandas DataFrame

I have a weird issue that the result doesn't change for each iteration. The code is the following:
import pandas as pd
import numpy as np
X = np.arange(10,100)
Y = X[::-1]
Z = np.array([X,Y]).T
df = pd.DataFrame(Z ,columns = ['col1','col2'])
dif = df['col1'] - df['col2']
for gap in range(100):
Up = dif > gap
Down = dif < -gap
df.loc[Up,'predict'] = 'Up'
df.loc[Down,'predict'] = 'Down'
df_result = df.dropna()
Total = df.shape[0]
count = df_result.shape[0]
ratio = count/Total
print(f'Total: {Total}; count: {count}; ratio: {ratio}')
The result is always
Total: 90; count: 90; ratio: 1.0
when it shouldn't be.
Found the root of the problem 5 mins after posting this question. I just needed to reset the dataFrame to the original to fix the problem.
import pandas as pd
import numpy as np
X = np.arange(10,100)
Y = X[::-1]
Z = np.array([X,Y]).T
df = pd.DataFrame(Z ,columns = ['col1','col2'])
df2 = df.copy()#added this line to preserve the original df
dif = df['col1'] - df['col2']
for gap in range(100):
df = df2.copy()#reset the altered df back to the original
Up = dif > gap
Down = dif < -gap
df.loc[Up,'predict'] = 'Up'
df.loc[Down,'predict'] = 'Down'
df_result = df.dropna()
Total = df.shape[0]
count = df_result.shape[0]
ratio = count/Total
print(f'Total: {Total}; count: {count}; ratio: {ratio}')

Optimizing using Pandas Data Frame

I have the following function that loads a csv into a data frame then does some calculations. It takes about 4-5 minutes to do calculation on the csv with a little over 100,000 lines. I was hoping there is a faster way.
def calculate_adeck_errors(in_file):
print(f'Starting Data Calculations: {datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")}')
pd.set_option('display.max_columns', 12)
# read in the raw csv
adeck_df = pd.read_csv(in_file)
#print(adeck_df)
#extract only the carq items and remove duplicates
carq_data = adeck_df[(adeck_df.MODEL == 'CARQ') & (adeck_df.TAU == 0)].drop_duplicates(keep='last')
#print(carq_data)
#remove carq items from original
final_df = adeck_df[adeck_df.MODEL != 'CARQ']
#print(final_df)
row_list = []
for index, row in carq_data.iterrows():
position_time = row['POSDATETIME']
for index, arow in final_df.iterrows():
if arow['POSDATETIME'] == position_time:
# match, so do calculations
storm_id = arow['STORMID']
model_base_time = arow['MODELDATETIME']
the_hour = arow['TAU']
the_model = arow['MODEL']
point1 = float(row['LAT']), float(row['LON'])
point2 = float(arow['LAT']), float(arow['LON'])
if arow['LAT'] == 0.0:
dist_error = None
else:
dist_error = int(round(haversine(point1, point2, miles=True)))
if arow['WIND'] != 0:
wind_error = int(abs(int(row['WIND']) - int(arow['WIND'])))
else: wind_error = None
if arow['PRES'] != 0:
pressure_error = int(abs(int(row['PRES']) - int(arow['PRES'])))
else:
pressure_error = None
lat_carq = row['LAT']
lon_carq = row['LON']
lat_model = arow['LAT']
lon_model = arow['LON']
wind_carq = row['WIND']
wind_model = arow['WIND']
pres_carq = row['PRES']
pres_model = arow['PRES']
row_list.append([storm_id, model_base_time, the_model, the_hour, lat_carq, lon_carq, lat_model, lon_model, dist_error,
wind_carq, wind_model, wind_error, pres_carq, pres_model, pressure_error])
result_df = pd.DataFrame(row_list)
result_df = result_df.where((pd.notnull(result_df)), None)
result_cols = ['StormID', 'ModelBasetime', 'Model' , 'Tau',
'LatCARQ', 'LonCARQ', 'LatModel', 'LonModel', 'DistError',
'WindCARQ', 'WindModel','WindError',
'PresCARQ', 'PresModel','PresError']
result_df.columns = result_cols
calculate_adeck_errors(infile)
To clarify what I'm doing:
1. The CARQ entries are the control (actual).
2. The other models are the guesses.
3. I'm comparing the control (CARQ) to the guesses to see what their errors are.
4. The basis of the comparison is the MODELBASETIME = POSBASETIME
4. A sample file I'm processing is here: http://vortexweather.com/downloads/adeck/aal062018.csv
I was hoping there is a faster way than i'm doing it, or another pandas method besides iterrows
Many thanks for suggestion.
Bryan
This code takes about 10 seconds to run your entire dataset!
The code looks very similar to what you have written, with the exception that all of the operations within the main_function have been vectorized. See Fast, Flexible, Easy and Intuitive: How to Speed Up Your Pandas Projects
2018-09-13_adeck_error_calculations.ipynb
import pandas as pd
import numpy as np
import datetime
from haversine import haversine
def main_function(df, row):
"""
The main difference here is that everything is vectorized
Returns: DataFrame
"""
df_new = pd.DataFrame()
df_storage = pd.DataFrame()
pos_datetime = df.POSDATETIME.isin([row['POSDATETIME']]) # creates a Boolean map
array_len = len(pos_datetime)
new_index = pos_datetime.index
df_new['StormID'] = df.loc[pos_datetime, 'STORMID']
df_new['ModelBaseTime'] = df.loc[pos_datetime, 'MODELDATETIME']
df_new['Model'] = df.loc[pos_datetime, 'MODEL']
df_new['Tau'] = df.loc[pos_datetime, 'TAU']
# Distance
df_new['LatCARQ'] = pd.DataFrame(np.full((array_len, 1), row['LAT']), index=new_index).loc[pos_datetime, 0]
df_new['LonCARQ'] = pd.DataFrame(np.full((array_len, 1), row['LON']), index=new_index).loc[pos_datetime, 0]
df_new['LatModel'] = df.loc[pos_datetime, 'LAT']
df_new['LonModel'] = df.loc[pos_datetime, 'LON']
def calc_dist_error(row):
return round(haversine((row['LatCARQ'], row['LonCARQ']), (row['LatModel'], row['LonModel']), miles=True)) if row['LatModel'] != 0.0 else None
df_new['DistError'] = df_new.apply(calc_dist_error, axis=1)
# Wind
df_new['WindCARQ'] = pd.DataFrame(np.full((array_len, 1), row['WIND']), index=new_index).loc[pos_datetime, 0]
df_new['WindModel'] = df.loc[pos_datetime, 'WIND']
df_storage['row_WIND'] = pd.DataFrame(np.full((array_len, 1), row['WIND']), index=new_index).loc[pos_datetime, 0]
df_storage['df_WIND'] = df.loc[pos_datetime, 'WIND']
def wind_error_calc(row):
return (row['row_WIND'] - row['df_WIND']) if row['df_WIND'] != 0 else None
df_new['WindError'] = df_storage.apply(wind_error_calc, axis=1)
# Air Pressure
df_new['PresCARQ'] = pd.DataFrame(np.full((array_len, 1), row['PRES']), index=new_index).loc[pos_datetime, 0]
df_new['PresModel'] = df.loc[pos_datetime, 'PRES']
df_storage['row_PRES'] = pd.DataFrame(np.full((array_len, 1), row['PRES']), index=new_index).loc[pos_datetime, 0]
df_storage['df_PRES'] = df.loc[pos_datetime, 'PRES']
def pres_error_calc(row):
return abs(row['row_PRES'] - row['df_PRES']) if row['df_PRES'] != 0 else None
df_new['PresError'] = df_storage.apply(pres_error_calc, axis=1)
del(df_storage)
return df_new
def calculate_adeck_errors(in_file):
"""
Retruns: DataFrame
"""
print(f'Starting Data Calculations: {datetime.datetime.now().strftime("%I:%M:%S%p on %B %d, %Y")}')
pd.set_option('max_columns', 20)
pd.set_option('max_rows', 300)
# read in the raw csv
adeck_df = pd.read_csv(in_file)
adeck_df['MODELDATETIME'] = pd.to_datetime(adeck_df['MODELDATETIME'], format='%Y-%m-%d %H:%M')
adeck_df['POSDATETIME'] = pd.to_datetime(adeck_df['POSDATETIME'], format='%Y-%m-%d %H:%M')
#extract only the carq items and remove duplicates
carq_data = adeck_df[(adeck_df.MODEL == 'CARQ') & (adeck_df.TAU == 0)].drop_duplicates(keep='last')
print('Len carq_data: ', len(carq_data))
#remove carq items from original
final_df = adeck_df[adeck_df.MODEL != 'CARQ']
print('Len final_df: ', len(final_df))
df_out_new = pd.DataFrame()
for index, row in carq_data.iterrows():
test_df = main_function(final_df, row) # function call
df_out_new = df_out_new.append(test_df, sort=False)
df_out_new = df_out_new.reset_index(drop=True)
df_out_new = df_out_new.where((pd.notnull(df_out_new)), None)
print(f'Finishing Data Calculations: {datetime.datetime.now().strftime("%I:%M:%S%p on %B %d, %Y")}')
return df_out_new
in_file = 'aal062018.csv'
df = calculate_adeck_errors(in_file)
>>>Starting Data Calculations: 02:18:30AM on September 13, 2018
>>>Len carq_data: 56
>>>Len final_df: 137999
>>>Finishing Data Calculations: 02:18:39AM on September 13, 2018
print(len(df))
>>>95630
print(df.head(20))
Please don't forget to check the accepted solution. Enjoy!
Looks like you are creating two dataframes out of the same dataframe, and then processing them. Two things that may cut your time.
First, you are iterating over both dataframes and checking for a condition:
for _, row in carq_data.iterrows():
for _, arow in final_df.iterrows():
if arow['POSDATETIME'] == row['POSDATETIME']:
# do something by using both tables
This is essentially an implementation of a join. You are joining carq_data with final_df on 'POSDATETIME'.
As a first step, you should merge the tables:
merged = carq_data.merge(final_df, on=['POSDATETIME'])
At this point you will get multiple rows for each similar 'POSDATETIME'. In the below, let's assume column b is POSDATETIME:
>>> a
a b
0 1 11
1 1 33
>>> b
a b
0 1 2
1 1 3
2 1 4
>>> merged = a.merge(b, on=['a'])
>>> merged
a b_x b_y
0 1 11 2
1 1 11 3
2 1 11 4
3 1 33 2
4 1 33 3
5 1 33 4
Now, to do your conditional calculations, you can use the apply() function.
First, define a function:
def calc_dist_error(row):
return int(round(haversine(row['b_x'], row['b_y'], miles=True))) if row['a'] != 0.0 else None
Then apply it to every row:
merged['dist_error'] = merged.apply(calc_dist_error, axis=1)
Continuing my small example:
>>> merged['c'] = [1, 0, 0, 0, 2, 3]
>>> merged
a b_x b_y c
0 1 11 2 1
1 1 11 3 0
2 1 11 4 0
3 1 33 2 0
4 1 33 3 2
5 1 33 4 3
>>> def foo(row):
... return row['b_x'] - row['b_y'] if row['c'] != 0 else None
...
>>> merged['dist_error'] = merged.apply(foo, axis=1)
>>> merged
a b_x b_y c dist_error
0 1 11 2 1 9.0
1 1 11 3 0 NaN
2 1 11 4 0 NaN
3 1 33 2 0 NaN
4 1 33 3 2 30.0
5 1 33 4 3 29.0
This should help you reduce run time (see also this for how to check using %timeit). Hope this helps!

Python - Sorting

So I know how to import a texfile and sort numbers such as:
1
6
4
6
9
3
5
But I don't know how to sort a data that looks like:
Merchant_9976 20122
Merchant_9977 91840
Merchant_9978 92739
Merchant_9979 97252
Merchant_9980 76885
Merchant_9981 67835
Merchant_9982 42201
Merchant_9983 47463
Here's my code so far
import time
def sort_slow(seq):
"""
:param seq:
:return:
"""
for i in range(1, len(seq)):
j = i
while j > 0 and seq[j - 1] > seq[j]:
seq[j - 1], seq[j] = seq[j], seq[j - 1]
j -= 1
return seq
def main():
fileName = str(input('Please enter a filename: '))
file = open(fileName)
sort1 = []
for lines in file:
sort1.append(int(lines.strip()))
#a = [3, 5, 2, 1, 10]
starting = time.clock()
print(starting)
sort_slow(sort1)
print(sort1)
#print(sort_slow([a]))
#print(sort_slow(a))
elapsed = time.clock() - starting
print(elapsed)
main()
lines = file.readlines()
ar = map( lambda x: x.split(), lines )
ar = ar.sort(key=lambda x: x[1])
ar contains all lines in file sorted on second column.

Resources