missing required positional arguments for a named tuple - python

I am a newbie in programming. I am writing a Python script that extracts data from a pdf. I am having trouble with tuple. I am not able to provide its argument. I think it is my logic that is not correct including indentation, sequence, or something else.
I am hoping to get some explanation on why I am getting the error.
This is the sample of my PDF (i have to block some sensitive info)
This is what I am trying to achieve
I am getting this error:
Traceback (most recent call last):
File "/Users/jeff/PycharmProjects/extractFreightInvoice/main.py", line 79, in <module>
lines.append(Line('invDate, invNumber, poNumber, contactName, jobNumber, '
TypeError: <lambda>() missing 11 required positional arguments: 'invNumber', 'poNumber', 'contactName', 'jobNumber', 'jobName', 'invDescription', 'siteAddress', 'invItemsDesc', 'invItemsQty', 'invItemsUnitPrice', and 'invItemsAmount'
My code is as per below:
# This is a pdf extractor
import re
import pdfplumber
import pandas as pd
from collections import namedtuple
Line = namedtuple('Line', 'invDate, invNumber, poNumber, contactName, jobNumber, '
'jobName, invDescription, siteAddress, invItemsDesc, invItemsQty, invItemsUnitPrice, '
'invItemsAmount ')
invDate_re = re.compile(r'(Clever Core NZ Limited\s)(\d{1,2}/\d{1,2}/\d{4})(.+)')
invNumber_re = re.compile(r'(IN\d{6})')
poNumber_re = re.compile(r'\d{4}')
contactNameBen_re = re.compile(r'(Jordan\s.+)')
contactNameCraig_re = re.compile(r'(Lorna\s.+)')
jobNumber_re = re.compile(r'(J[\d]{6})')
jobName_re = re.compile(r'(Job Name)')
invDescription_re = re.compile(r'(Invoice Description)')
siteAddress_re = re.compile(r'(Site address.*)')
colHeading_re = re.compile(r'((Description)(.* Quantity.* Unit Price.*))')
invItems_re = re.compile(
r'(.+) (([0-9]*[.])?[0-9]+) (([0-9]*[.])?[0-9]+) (\d*\?\d+|\d{1,3}(,\d{3})*(\.\d+)?)')
# quoteLines_re = re.compile(r'(.+)(:\s*)(.+)')
# clevercorePriceLine_re = re.compile(r'(.* First .*\s?)(-\s?.*\$)(\s*)(.+)')
file = 'CombinedInvoicePdf.pdf'
lines = []
with pdfplumber.open(file) as myPdf:
for page in myPdf.pages:
text = page.extract_text()
lines = text.split('\n')
index = 0
for i in range(len(lines)):
line = lines[i]
invDateLine = invDate_re.search(line)
invNumberLine = invNumber_re.search(line)
poNumberLine = poNumber_re.search(line)
contactNameJordanLine = contactNameJordan_re.search(line)
contactNameLornaLine = contactNameLorna_re.search(line)
jobNumberLine = jobNumber_re.search(line)
jobNameLine = jobName_re.search(line)
invDescriptionLine = invDescription_re.search(line)
colHeadingLine = colHeading_re.search(line)
siteAddressLine = siteAddress_re.search(line)
invItemsLine = invItems_re.search(line)
if invDateLine:
invDate = invDateLine.group(2)
if invNumberLine:
invNumber = invNumberLine.group(1)
if poNumberLine and len(line) == 4:
poNumber = poNumberLine.group(0)
if contactNameBenLine:
contactName = 'Jordan Michael'
if contactNameCraigLine:
contactName = 'Lorna Tolentin'
if jobNumberLine:
jobNumber = lines[i]
if jobNameLine:
jobName = (lines[i + 1])
if invDescriptionLine:
invDescription = lines[i + 1]
if siteAddressLine:
if len(lines[i + 1]) > 0 and len(lines[i + 1]) == 0:
siteAddress = lines[i + 1]
elif len(lines[i + 1]) > 0 and len(lines[i + 1]) > 0:
siteAddress = lines[i + 1] + ' ' + lines[i + 2]
else:
siteAddress = 'check invoice'
if invItemsLine and invItemsLine[2] != '06':
invItemsDesc = invItemsLine.group(1)
invItemsQty = invItemsLine.group(2)
invItemsUnitPrice = invItemsLine.group(4)
invItemsAmount = invItemsLine.group(6)
lines.append(Line('invDate, invNumber, poNumber, contactName, jobNumber, '
'jobName, invDescription, siteAddress, invItemsDesc, invItemsQty, invItemsUnitPrice, '
'inItemsAmount'))
df = pd.DataFrame(lines)
print(df)
print(df.head())
df.to_csv('freightCharges.csv')

Line is a tuple subclass with parameters and fields
You need to fill them with separate parameters, not a single string
lines.append(Line('invDate', 'invNumber', 'poNumber', 'contactName', 'jobNumber', 'jobName', 'invDescription',
'siteAddress', 'invItemsDesc', 'invItemsQty', 'invItemsUnitPrice', 'inItemsAmount'))

Related

problem with moving data from device to host

Hi I'm trying to create an AI inspired NEAT but I ran into a problem after performing the calculation, the data from the device can't be moved to the host.
I'm trying to use cuda.synchronize () before moving the data, but in this case the error has already occurred on this line.
I tried the cuda features before and they worked without problems until now.
I'm attaching the code with an error.
Please can you help me?
code:
import os
# needs to appear before cuda import
os.environ["NUMBA_ENABLE_CUDASIM"] = "0"
# set to "1" for more debugging, but slower performance
os.environ["NUMBA_CUDA_DEBUGINFO"] = "0"
from numba import cuda
import numpy as np
from random import uniform
from pprint import pprint
#cuda.jit(device=True)
def LeakyRelu(x):
return max(x * 0.1, x)
#cuda.jit
def Calculate(set_io, many_neurons, many_inputs, reindex_io, memory_io, weights_io, biases_io, nets_info_io, auxmemort_io):
x = cuda.grid(1)
shape = set_io.shape
if x < shape[0]:
netidx = reindex_io[x, 0]
neuidx = reindex_io[x, 1]
weiidx = reindex_io[x, 2]
result = 0
for i in range(nets_info_io[netidx, 1]):
result += memory_io[nets_info_io[netidx, 0] + i] * weights_io[weiidx + i]
result += biases_io[x]
result = LeakyRelu(result)
auxmemort_io[neuidx] = result
def CalculateSlow():
pass
class ANET:
def __init__(self, many_inputs, many_outputs, many_networks, info = True, activation_function = LeakyRelu):
self.many_inputs = many_inputs
self.many_outputs = many_outputs
self.many_networks = many_networks
self.activation_function = activation_function
self.info = info
self.netid = -1
self.cuda_many_input = cuda.to_device(self.many_inputs)
if self.info:
print("starting Setup:\n|")
device = str(cuda.get_current_device()).split("'b'")[1].split("''")[0]
print(f"| cuda run on: {device}")
print(f"| generate genomes")
self.networks_genomes = [self._GenerateGenome() for _ in range(self.many_networks)]
self._BuildPopulation()
def _CudaPre(self, block, array):
griddim = tuple(np.array(array.shape) // block + 1)
blockdim = tuple(np.full_like(griddim, block))
return griddim, blockdim
def _GenerateGenome(self):
neurons_genome = [[self.activation_function, uniform(-1,1), i + self.many_inputs] for i in range(self.many_outputs)]
synapses_genome = [[[i, ii], uniform(-1,1), True] for i in range(self.many_inputs) for ii in range(self.many_inputs, self.many_outputs + self.many_inputs)]
self.netid += 1
return [neurons_genome, synapses_genome, self.netid]
def _BuildPopulation(self):
memory_len = sum([(len(i[0]) + self.many_inputs) for i in self.networks_genomes])
memory = np.zeros(memory_len, dtype=np.float64)
auxmemory = np.copy(memory)
weights_len = sum([(len(i[1]) + self.many_inputs + 1) for i in self.networks_genomes])
weights = np.zeros(weights_len, dtype=np.float64)
biases_len = sum([len(i[0]) for i in self.networks_genomes])
biases = np.zeros(biases_len, dtype=np.float64)
nets_info = np.zeros([self.many_networks, 5], dtype=np.int64)
movmem = 0
movwei = 0
movbia = 0
for idx,i in enumerate(self.networks_genomes):
nets_info[idx] = (movmem, len(i[0]) + self.many_inputs, movwei, movbia, i[2])
movmem += len(i[0]) + self.many_inputs
movwei += len(i[1]) + self.many_inputs + 1
movbia += len(i[0])
biaidx = 0
for genome, net_info in zip(self.networks_genomes, nets_info):
for gen, biagen in zip(genome[1], genome[0]):
if gen[2]:
if gen[0][0] < self.many_inputs:
target = gen[0][0]
else:
target = genome[0][gen[0][0] - self.many_inputs - self.many_outputs][2] + self.many_inputs
weights[(gen[0][1] - self.many_inputs) * net_info[1] + net_info[2] + target] = gen[1]
biases[biaidx] = biagen[1]
biaidx += 1
reindex = np.empty([sum([(i[1] - self.many_inputs) for i in nets_info]), 3], dtype=np.int64)
write = 0
weiidx = 0
for idx,inf in enumerate(nets_info):
for i in range(inf[1] - self.many_inputs):
reindex[write][0] = idx
reindex[write][1] = inf[0] + i + self.many_inputs
reindex[write][2] = weiidx
weiidx += inf[1]
write += 1
if self.info:
print("| move arrays to device")
self.memory = memory
self.cuda_auxmemory = cuda.to_device(auxmemory)
print(self.cuda_auxmemory.copy_to_host())
self.cuda_weights = cuda.to_device(weights)
self.cuda_biases = cuda.to_device(biases)
self.nets_info = nets_info
self.cuda_nets_info = cuda.to_device(nets_info)
self.many_neurons = np.sum(nets_info, axis=0)[1]
self.cuda_many_neurons = cuda.to_device(self.many_neurons)
self.cuda_reindex = cuda.to_device(reindex)
# print(nets_info)
# print(f"{memory_len} | {weights_len} | {biases_len}")
def MathPopulation(self, inputs):
for idx, inp in enumerate(inputs):
self.memory[self.nets_info[idx][0]: self.nets_info[idx][0] + self.many_inputs] = inp
self.cuda_memory = cuda.to_device(self.memory)
setlen = cuda.to_device(np.zeros(self.many_neurons))
Calculate[self._CudaPre(32, np.empty([self.many_neurons]))](setlen, self.cuda_many_neurons, self.cuda_many_input, self.cuda_reindex, self.cuda_memory, self.cuda_weights, self.cuda_biases, self.cuda_nets_info, self.cuda_auxmemory)
# cuda.synchronize()
arr = self.cuda_auxmemory.copy_to_host()
print(arr)
if __name__ == '__main__':
anet = ANET(many_inputs = 3, many_outputs = 2, many_networks = 5)
# pprint(anet._GenerateGenome())
anet.MathPopulation([[1,2,3], [4,5,6], [7,8,9], [10,11,12], [13,14,15]])
error:
Traceback (most recent call last):
File "c:\Users\Ondra\Documents\Pythonporno\ANET\ANET.py", line 144, in <module>
anet.MathPopulation([[1,2,3], [4,5,6], [7,8,9], [10,11,12], [13,14,15]])
File "c:\Users\Ondra\Documents\Pythonporno\ANET\ANET.py", line 138, in MathPopulation
arr = self.cuda_auxmemory.copy_to_host()
File "C:\Users\Ondra\AppData\Local\Programs\Python\Python310\lib\site-packages\numba\cuda\cudadrv\devices.py", line 232, in _require_cuda_context
return fn(*args, **kws)
File "C:\Users\Ondra\AppData\Local\Programs\Python\Python310\lib\site-packages\numba\cuda\cudadrv\devicearray.py", line 277, in copy_to_host
_driver.device_to_host(hostary, self, self.alloc_size,
File "C:\Users\Ondra\AppData\Local\Programs\Python\Python310\lib\site-packages\numba\cuda\cudadrv\driver.py", line 2998, in device_to_host
fn(host_pointer(dst), device_pointer(src), size, *varargs)
File "C:\Users\Ondra\AppData\Local\Programs\Python\Python310\lib\site-packages\numba\cuda\cudadrv\driver.py", line 319, in safe_cuda_api_call
self._check_ctypes_error(fname, retcode)
File "C:\Users\Ondra\AppData\Local\Programs\Python\Python310\lib\site-packages\numba\cuda\cudadrv\driver.py", line 384, in _check_ctypes_error
raise CudaAPIError(retcode, msg)
numba.cuda.cudadrv.driver.CudaAPIError: [700] Call to cuMemcpyDtoH results in UNKNOWN_CUDA_ERROR

Is there a function to add WOE, calculated on Training data, to the whole data set? (python)

I am working on some python code to predict Default rate of loans handed out by a bank.
I have calculated the WOE and information value (IV) on the training set
(using the following code: https://github.com/Sundar0989/WOE-and-IV/blob/master/WOE_IV.ipynb?fbclid=IwAR1MvEfyGsdyTre0uPJC5WRl91dfue_t0vH5qJezwm2mAg6sjHZJg9MyDYo).
We have also concluded 2 high cardinality variables. We don't know however how to add these WOE scores to the whole set. How do we tackle this problem? How can we go further to use WOE to predict the target variable?
code:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy, pylab
Reading the data received from bank, feature selection part 1, splitting up whole set (Training) into training set: indices_traintrain, validation set: indices_val and test set: indices_test (70/30 split training and validation set - test set and 70/30 split training - validation)
Training =
pd.read_excel('/Users/enjo/Documents/Master/DM/Data_DSC2019_STUDENTS/DSC2019_Training.xlsx', na_values=np.nan)
Status = Training.iloc[:,-1]
Data = Training.iloc[:,0:45]
Data_missing = Data.isna()
Data_missing = Data_missing.sum()
print(Data_missing/len(Data))
"""
drop variables with more than 80% missing
"""
Drop = ['FREE_CASH_FLOW_AMT',
'A2_MTHS_FIRST_PCX_COREPROF_CNT', 'A2_MONTHS_IN_BELGIUM_CNT', 'A2_MTHS_SNC_FIRST_COREPROF_CNT', 'MONTHS_SINCE_LAST_REFUSAL_CNT']
DroppedTraining = Training.copy()
for element in Drop:
DroppedTraining.drop(element, axis=1,inplace=True)
import numpy as np
from sklearn import datasets
from sklearn import svm
from sklearn import preprocessing
Data_preprocessed=[] #contains preprocessed data
from Preprocessing_continuous import Preprocessing_continuous #import function for preprocessing
from Preprocessing_discrete import Preprocessing_discrete #import function for preprocessing
from sklearn.model_selection import train_test_split
indices=np.arange(26962)
indices_train, indices_test = train_test_split(indices, test_size=0.3, random_state=0)
indices_traintrain, indices_val = train_test_split(indices_train, test_size=0.3, random_state=0)
Training['target']= Training['Label_Default'].apply(lambda x:1 if x=='Y' else 0)
Highcardinalityset=[]
Highcardinalityset = Training[['Type',
'INDUSTRY_CD_3',
'INDUSTRY_CD_4',
'Managing_Sales_Office_Nbr',
'Postal_Code_L',
'Product_Desc',
'CREDIT_TYPE_CD',
'ACCOUNT_PURPOSE_CD',
'A2_MARITAL_STATUS_CD',
'FINANCIAL_PRODUCT_TYPE_CD',
'A2_EMPLOYMENT_STATUS_CD',
'A2_RESIDENT_STATUS_CD',
'target']]
Highcardinalityset = Highcardinalityset.iloc[indices_traintrain]
function found on github
import pandas as pd
import numpy as np
import pandas.core.algorithms as algos
from pandas import Series
import scipy.stats.stats as stats
import re
import traceback
import string
max_bin = 20
force_bin = 3
# define a binning function
def mono_bin(Y, X, n = max_bin):
df1 = pd.DataFrame({"X": X, "Y": Y})
justmiss = df1[['X','Y']][df1.X.isnull()]
notmiss = df1[['X','Y']][df1.X.notnull()]
r = 0
while np.abs(r) < 1:
try:
d1 = pd.DataFrame({"X": notmiss.X, "Y": notmiss.Y, "Bucket": pd.qcut(notmiss.X, n)})
d2 = d1.groupby('Bucket', as_index=True)
r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)
n = n - 1
except Exception as e:
n = n - 1
if len(d2) == 1:
n = force_bin
bins = algos.quantile(notmiss.X, np.linspace(0, 1, n))
if len(np.unique(bins)) == 2:
bins = np.insert(bins, 0, 1)
bins[1] = bins[1]-(bins[1]/2)
d1 = pd.DataFrame({"X": notmiss.X, "Y": notmiss.Y, "Bucket": pd.cut(notmiss.X, np.unique(bins),include_lowest=True)})
d2 = d1.groupby('Bucket', as_index=True)
d3 = pd.DataFrame({},index=[])
d3["MIN_VALUE"] = d2.min().X
d3["MAX_VALUE"] = d2.max().X
d3["COUNT"] = d2.count().Y
d3["EVENT"] = d2.sum().Y
d3["NONEVENT"] = d2.count().Y - d2.sum().Y
d3=d3.reset_index(drop=True)
if len(justmiss.index) > 0:
d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0])
d4["MAX_VALUE"] = np.nan
d4["COUNT"] = justmiss.count().Y
d4["EVENT"] = justmiss.sum().Y
d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y
d3 = d3.append(d4,ignore_index=True)
d3["EVENT_RATE"] = d3.EVENT/d3.COUNT
d3["NON_EVENT_RATE"] = d3.NONEVENT/d3.COUNT
d3["DIST_EVENT"] = d3.EVENT/d3.sum().EVENT
d3["DIST_NON_EVENT"] = d3.NONEVENT/d3.sum().NONEVENT
d3["WOE"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["IV"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["VAR_NAME"] = "VAR"
d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']]
d3 = d3.replace([np.inf, -np.inf], 0)
d3.IV = d3.IV.sum()
return(d3)
def char_bin(Y, X):
df1 = pd.DataFrame({"X": X, "Y": Y})
justmiss = df1[['X','Y']][df1.X.isnull()]
notmiss = df1[['X','Y']][df1.X.notnull()]
df2 = notmiss.groupby('X',as_index=True)
d3 = pd.DataFrame({},index=[])
d3["COUNT"] = df2.count().Y
d3["MIN_VALUE"] = df2.sum().Y.index
d3["MAX_VALUE"] = d3["MIN_VALUE"]
d3["EVENT"] = df2.sum().Y
d3["NONEVENT"] = df2.count().Y - df2.sum().Y
if len(justmiss.index) > 0:
d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0])
d4["MAX_VALUE"] = np.nan
d4["COUNT"] = justmiss.count().Y
d4["EVENT"] = justmiss.sum().Y
d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y
d3 = d3.append(d4,ignore_index=True)
d3["EVENT_RATE"] = d3.EVENT/d3.COUNT
d3["NON_EVENT_RATE"] = d3.NONEVENT/d3.COUNT
d3["DIST_EVENT"] = d3.EVENT/d3.sum().EVENT
d3["DIST_NON_EVENT"] = d3.NONEVENT/d3.sum().NONEVENT
d3["WOE"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["IV"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["VAR_NAME"] = "VAR"
d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']]
d3 = d3.replace([np.inf, -np.inf], 0)
d3.IV = d3.IV.sum()
d3 = d3.reset_index(drop=True)
return(d3)
def data_vars(df1, target):
stack = traceback.extract_stack()
filename, lineno, function_name, code = stack[-2]
vars_name = re.compile(r'\((.*?)\).*$').search(code).groups()[0]
final = (re.findall(r"[\w']+", vars_name))[-1]
x = df1.dtypes.index
count = -1
for i in x:
if i.upper() not in (final.upper()):
if np.issubdtype(df1[i], np.number) and len(Series.unique(df1[i])) > 2:
conv = mono_bin(target, df1[i])
conv["VAR_NAME"] = i
count = count + 1
else:
conv = char_bin(target, df1[i])
conv["VAR_NAME"] = i
count = count + 1
if count == 0:
iv_df = conv
else:
iv_df = iv_df.append(conv,ignore_index=True)
iv = pd.DataFrame({'IV':iv_df.groupby('VAR_NAME').IV.max()})
iv = iv.reset_index()
return(iv_df,iv)
final_iv, IV = data_vars(Highcardinalityset,Highcardinalityset.target)
final_iv
IV.sort_values('IV')
IV.to_csv('test.csv')
transform_vars_list = Highcardinalityset.columns.difference(['target'])
transform_prefix = 'new_' # leave this value blank if you need replace the original column values
transform_vars_list
for var in transform_vars_list:
small_df = final_iv[final_iv['VAR_NAME'] == var]
transform_dict = dict(zip(small_df.MAX_VALUE.astype(str),small_df.WOE.astype(str)))
replace_cmd = ''
replace_cmd1 = ''
for i in sorted(transform_dict.items()):
replace_cmd = replace_cmd + str(i[1]) + str(' if x <= ') + str(i[0]) + ' else '
replace_cmd1 = replace_cmd1 + str(i[1]) + str(' if x == "') + str(i[0]) + '" else '
replace_cmd = replace_cmd + '0'
replace_cmd1 = replace_cmd1 + '0'
if replace_cmd != '0':
try:
Highcardinalityset[transform_prefix + var] = Highcardinalityset[var].apply(lambda x: eval(replace_cmd))
except:
Highcardinalityset[transform_prefix + var] = Highcardinalityset[var].apply(lambda x: eval(replace_cmd1))
Highcardinalityset['Postal_Code_L'].value_counts()
Highcardinalityset['new_Postal_Code_L'].value_counts()
Highcardinalityset['Managing_Sales_Office_Nbr'].value_counts()
Highcardinalityset['new_Managing_Sales_Office_Nbr'].value_counts()
Nice to see when high WOE: interesting for that postal code: high risk for default!
Highcardinalityset.to_excel("Highcardinalitysettraintrain.xlsx")
TrainingWOE = DroppedTraining[['Managing_Sales_Office_Nbr', "Postal_Code_L"]]
TrainingWOE["Postal_Code_L_WOE"]=Highcardinalityset[["new_Postal_Code_L"]]
TrainingWOE["Managing_Sales_Office_Nbr_WOE"]=Highcardinalityset[["new_Managing_Sales_Office_Nbr"]]
drop variables that are not relevant because of low IV value
Drop = ["ACCOUNT_PURPOSE_CD", "A2_MARITAL_STATUS_CD", "A2_EMPLOYMENT_STATUS_CD", "A2_RESIDENT_STATUS_CD",
"INDUSTRY_CD_3", "INDUSTRY_CD_4","Type"]
DroppedTrainingAfterIVcalc = DroppedTraining.copy()
for element in Drop:
DroppedTrainingAfterIVcalc.drop(element, axis=1,inplace=True)
preprocess remaining (44-5 (because of too many missing) - 7 (because of low iv) + 1 (target variable added))
Thanks for asking this question. Here is the code to do the required transformation which is shown in the notebook as well.
transform_vars_list = df.columns.difference(['target'])
transform_prefix = 'new_' # leave this value blank to replace the original column
#apply transformations
for var in transform_vars_list:
small_df = final_iv[final_iv['VAR_NAME'] == var]
transform_dict = dict(zip(small_df.MAX_VALUE,small_df.WOE))
replace_cmd = ''
replace_cmd1 = ''
for i in sorted(transform_dict.items()):
replace_cmd = replace_cmd + str(i[1]) + str(' if x <= ') + str(i[0]) + ' else '
replace_cmd1 = replace_cmd1 + str(i[1]) + str(' if x == "') + str(i[0]) + '" else '
replace_cmd = replace_cmd + '0'
replace_cmd1 = replace_cmd1 + '0'
if replace_cmd != '0':
try:
df[transform_prefix + var] = df[var].apply(lambda x: eval(replace_cmd))
except:
df[transform_prefix + var] = df[var].apply(lambda x: eval(replace_cmd1))
In addition, there is a package Xverse which does the same. Please refer to it here - https://github.com/Sundar0989/XuniVerse

Bulk-insertion into couchbase via python

I am trying to do some bulk-insertion in couch-base. I tried to search examples over SO and google, but I could not get any clue. Here someone mention that its not possible.
How to insert a documents in bulk in Couchbase?
but I guess this question was asked 3 years ago. I search and if I understand correctly from below given link, its possible to insert document in bulk.
https://developer.couchbase.com/documentation/server/current/sdk/batching-operations.html
https://pythonhosted.org/couchbase/api/couchbase.html#batch-operation-pipeline
Here is my code on which I want to implement bulk-insertion in couchbase
import time
import csv
from couchbase import Couchbase
from couchbase.bucket import Bucket
from couchbase.exceptions import CouchbaseError
c = Bucket('couchbase://localhost/bulk-load')
from couchbase.exceptions import CouchbaseTransientError
BYTES_PER_BATCH = 1024 * 256 # 256K
with open('/home/royshah/Desktop/bulk_try/roy.csv') as csvfile:
lines = csvfile.readlines()[4:]
for k, line in enumerate(lines):
data_tmp = line.strip().split(',')
strDate = data_tmp[0].replace("\"", "")
timerecord = datetime.datetime.strptime(strDate,
'%Y-%m-%d %H:%M:%S.%f')
microsecs = timerecord.microsecond
strDate = "\"" + strDate + "\""
ts = calendar.timegm(timerecord.timetuple())*1000000 + microsecs
datastore = [ts] + data_tmp[1:]
stre = {'col1 ': datastore[1], # I am making key-values on the fly from csv file
'col2': datastore[2],
'col3': datastore[3],
'col4': datastore[4],
'col5': datastore[5],
'col6': datastore[6]}
cb.upsert(str(datastore[0]), (stre)) # datastore[0] is used as document
id and (stre) is used as key-value to be
inserted for respective id.
cb.upsert(str(datastore[0]), (stre))
is doing single insertion and I want to make it bulk-insertion to make it faster. I had no idea how to turn this in bulk-insertion in couchbase. I find this example but not sure how to implement.
https://developer.couchbase.com/documentation/server/current/sdk/batching-operations.html
If someone point out some examples of bulk-load in couchbase or help me to figure out how can I do bulk-insertion via my code. I would be really really grateful. .thanx a lot for any idea or help.
I tried to adapt the example from the docs to your use case. You maybe have to change one or two details but you should get the idea.
c = Bucket('couchbase://localhost/bulk-load')
from couchbase.exceptions import CouchbaseTransientError
BYTES_PER_BATCH = 1024 * 256 # 256K
batches = []
cur_batch = {}
cur_size = 0
batches.append(cur_batch)
with open('/home/royshah/Desktop/bulk_try/roy.csv') as csvfile:
lines = csvfile.readlines()[4:]
for key, line in enumerate(lines):
#Format your data
data_tmp = line.strip().split(',')
strDate = data_tmp[0].replace("\"", "")
timerecord = datetime.datetime.strptime(strDate,
'%Y-%m-%d %H:%M:%S.%f')
microsecs = timerecord.microsecond
strDate = "\"" + strDate + "\""
timestamp = calendar.timegm(timerecord.timetuple())*1000000 + microsecs
#Build kv
datastore = [ts] + data_tmp[1:]
value = {'col1 ': datastore[1], # I am making key-values on the fly from csv file
'col2': datastore[2],
'col3': datastore[3],
'col4': datastore[4],
'col5': datastore[5],
'col6': datastore[6]}
key = str(datastore[0]
cur_batch[key] = value
cur_size += len(key) + len(value) + 24
if cur_size > BYTES_PER_BATCH:
cur_batch = {}
batches.append(cur_batch)
cur_size = 0
print "Have {} batches".format(len(batches))
num_completed = 0
while batches:
batch = batches[-1]
try:
cb.upsert_multi(batch)
num_completed += len(batch)
batches.pop()
except CouchbaseTransientError as e:
print e
ok, fail = e.split_results()
new_batch = {}
for key in fail:
new_batch[key] = all_data[key]
batches.pop()
batches.append(new_batch)
num_completed += len(ok)
print "Retrying {}/{} items".format(len(new_batch), len(ok))

Python: invalid syntax: <string>, line 1, pos 16

I have developed a code in Python in which -in order to run the program- I need to take some arguments from the command line. But I am getting continuously the same error:
Traceback (most recent call last):
File "<string>", line 1, in <fragment>
invalid syntax: <string>, line 1, pos 16
I have the faintest idea what is wrong with my code. So, I present my code below in case someone could help me:
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
import math
import copy
import QSTK.qstkstudy.EventProfiler as ep
import csv
import sys
import argparse
def readData(li_startDate, li_endDate, ls_symbols):
#Create datetime objects for Start and End dates (STL)
dt_start = dt.datetime(li_startDate[0], li_startDate[1], li_startDate[2])
dt_end = dt.datetime(li_endDate[0], li_endDate[1], li_endDate[2])
#Initialize daily timestamp: closing prices, so timestamp should be hours=16 (STL)
dt_timeofday = dt.timedelta(hours=16)
#Get a list of trading days between the start and end dates (QSTK)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
#Create an object of the QSTK-dataaccess class with Yahoo as the source (QSTK)
c_dataobj = da.DataAccess('Yahoo', cachestalltime=0)
#Keys to be read from the data
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
#Read the data and map it to ls_keys via dict() (i.e. Hash Table structure)
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
return [d_data, dt_start, dt_end, dt_timeofday, ldt_timestamps]
def marketsim(cash,orders_file,values_file):
orders = pd.read_csv(orders_file,index_col='Date',parse_dates=True,header=None)
ls_symbols = list(set(orders['X.4'].values))
df_lastrow = len(orders) - 1
dt_start = dt.datetime(orders.get_value(0, 'X.1'),orders.get_value(0, 'X.2'),orders.get_value(0, 'X.3'))
dt_end = dt.datetime(orders.get_value(df_lastrow, 'X.1'),orders.get_value(df_lastrow, 'X.2'),orders.get_value(df_lastrow, 'X.3') + 1 )
#d_data = readData(dt_start,dt_end,ls_symbols)
#Initialize daily timestamp: closing prices, so timestamp should be hours=16 (STL)
dt_timeofday = dt.timedelta(hours=16)
#Get a list of trading days between the start and end dates (QSTK)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
#Create an object of the QSTK-dataaccess class with Yahoo as the source (QSTK)
c_dataobj = da.DataAccess('Yahoo', cachestalltime=0)
#Keys to be read from the data
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
#Read the data and map it to ls_keys via dict() (i.e. Hash Table structure)
df_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
ls_symbols.append("_CASH")
trades = pd.Dataframe(index=list(ldt_timestamps[0]),columns=list(ls_symbols))
current_cash = cash
trades["_CASH"][ldt_timestamps[0]] = current_cash
current_stocks = dict()
for symb in ls_symbols:
current_stocks[symb] = 0
trades[symb][ldt_timestamps[0]] = 0
for row in orders.iterrows():
row_data = row[1]
current_date = dt.datetime(row_data['X.1'],row_data['X.2'],row_data['X.3'],16)
symb = row_data['X.4']
stock_value = d_data['close'][symb][current_date]
stock_amount = row_data['X.6']
if row_data['X.5'] == "Buy":
current_cash = current_cash - (stock_value*stock_amount)
trades["_CASH"][current_date] = current_cash
current_stocks[symb] = current_stocks[symb] + stock_amount
trades[symb][current_date] = current_stocks[symb]
else:
current_cash = current_cash + (stock_value*stock_amount)
trades["_CASH"][current_date] = current_cash
current_stocks[symb] = current_stocks[symb] - stock_amount
trades[symb][current_date] = current_stocks[symb]
#trades.fillna(method='ffill',inplace=True)
#trades.fillna(method='bfill',inplace=False)
trades.fillna(0)
#alt_cash = current_cash
#alt_cash = trades.cumsum()
value_data = pd.Dataframe(index=list(ldt_timestamps),columns=list("V"))
value_data = value_data.fillna(0)
value_data = value_data.cumsum(axis=0)
for day in ldt_timestamps:
value = 0
for sym in ls_symbols:
if sym == "_CASH":
value = value + trades[sym][day]
else:
value = calue + trades[sym][day]*d_data['close'][sym][day]
value_data["V"][day] = value
fileout = open(values_file,"w")
for row in value_data.iterrows():
file_out.writelines(str(row[0].strftime('%Y,%m,%d')) + ", " + str(row[1]["V"].round()) + "\n" )
fileout.close()
def main(argv):
if len(sys.argv) != 3:
print "Invalid arguments for marketsim.py. It should be of the following syntax: marketsim.py orders_file.csv values_file.csv"
sys.exit(0)
#initial_cash = int (sys.argv[1])
initial_cash = 1000000
ordersFile = str(sys.argv[1])
valuesFile = str(sys.argv[2])
marketsim(initial_cash,ordersFile,valuesFile)
if __name__ == "__main__":
main(sys.argv[1:])
The input I gave to the command line was:
python marketsim.py orders.csv values.csv
I guess that the problem lies either into the imports or probably into the main function(incl. the if below the def main(argv)
I have to point out that the files orders.csv and values.csv exist and are located into the same folder.
I hope have made everything clear.
So, I am looking forward to reading your answers community-mates! :D
Thank you!

How to add a member function to an existing Python object?

Previously I created a lot of Python objects of class A, and I would like to add a new function plotting_in_PC_space_with_coloring_option() (the purpose of this function is to plot some data in this object) to class A and use those old objects to call plotting_in_PC_space_with_coloring_option().
An example is:
import copy
import numpy as np
from math import *
from pybrain.structure import *
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets.supervised import SupervisedDataSet
import pickle
import neural_network_related
class A(object):
"""the neural network for simulation"""
'''
todo:
- find boundary
- get_angles_from_coefficients
'''
def __init__(self,
index, # the index of the current network
list_of_coor_data_files, # accept multiple files of training data
energy_expression_file, # input, output files
preprocessing_settings = None,
connection_between_layers = None, connection_with_bias_layers = None,
PCs = None, # principal components
):
self._index = index
self._list_of_coor_data_files = list_of_coor_data_files
self._energy_expression_file = energy_expression_file
self._data_set = []
for item in list_of_coor_data_files:
self._data_set += self.get_many_cossin_from_coordiantes_in_file(item)
self._preprocessing_settings = preprocessing_settings
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
self._node_num = [8, 15, 2, 15, 8]
self._PCs = PCs
def save_into_file(self, filename = None):
if filename is None:
filename = "network_%s.pkl" % str(self._index) # by default naming with its index
with open(filename, 'wb') as my_file:
pickle.dump(self, my_file, pickle.HIGHEST_PROTOCOL)
return
def get_cossin_from_a_coordinate(self, a_coordinate):
num_of_coordinates = len(a_coordinate) / 3
a_coordinate = np.array(a_coordinate).reshape(num_of_coordinates, 3)
diff_coordinates = a_coordinate[1:num_of_coordinates, :] - a_coordinate[0:num_of_coordinates - 1,:] # bond vectors
diff_coordinates_1=diff_coordinates[0:num_of_coordinates-2,:];diff_coordinates_2=diff_coordinates[1:num_of_coordinates-1,:]
normal_vectors = np.cross(diff_coordinates_1, diff_coordinates_2);
normal_vectors_normalized = np.array(map(lambda x: x / sqrt(np.dot(x,x)), normal_vectors))
normal_vectors_normalized_1 = normal_vectors_normalized[0:num_of_coordinates-3, :];normal_vectors_normalized_2 = normal_vectors_normalized[1:num_of_coordinates-2,:];
diff_coordinates_mid = diff_coordinates[1:num_of_coordinates-2]; # these are bond vectors in the middle (remove the first and last one), they should be perpendicular to adjacent normal vectors
cos_of_angles = range(len(normal_vectors_normalized_1))
sin_of_angles_vec = range(len(normal_vectors_normalized_1))
sin_of_angles = range(len(normal_vectors_normalized_1)) # initialization
for index in range(len(normal_vectors_normalized_1)):
cos_of_angles[index] = np.dot(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles_vec[index] = np.cross(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles[index] = sqrt(np.dot(sin_of_angles_vec[index], sin_of_angles_vec[index])) * np.sign(sum(sin_of_angles_vec[index]) * sum(diff_coordinates_mid[index]));
return cos_of_angles + sin_of_angles
def get_many_cossin_from_coordinates(self, coordinates):
return map(self.get_cossin_from_a_coordinate, coordinates)
def get_many_cossin_from_coordiantes_in_file (self, filename):
coordinates = np.loadtxt(filename)
return self.get_many_cossin_from_coordinates(coordinates)
def mapminmax(self, my_list): # for preprocessing in network
my_min = min(my_list)
my_max = max(my_list)
mul_factor = 2.0 / (my_max - my_min)
offset = (my_min + my_max) / 2.0
result_list = np.array(map(lambda x : (x - offset) * mul_factor, my_list))
return (result_list, (mul_factor, offset)) # also return the parameters for processing
def get_mapminmax_preprocess_result_and_coeff(self,data=None):
if data is None:
data = self._data_set
data = np.array(data)
data = np.transpose(data)
result = []; params = []
for item in data:
temp_result, preprocess_params = self.mapminmax(item)
result.append(temp_result)
params.append(preprocess_params)
return (np.transpose(np.array(result)), params)
def mapminmax_preprocess_using_coeff(self, input_data=None, preprocessing_settings=None):
# try begin
if preprocessing_settings is None:
preprocessing_settings = self._preprocessing_settings
temp_setttings = np.transpose(np.array(preprocessing_settings))
result = []
for item in input_data:
item = np.multiply(item - temp_setttings[1], temp_setttings[0])
result.append(item)
return result
# try end
def get_expression_of_network(self, connection_between_layers=None, connection_with_bias_layers=None):
if connection_between_layers is None:
connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None:
connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
expression = ""
# first part: network
for i in range(2):
expression = '\n' + expression
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i])
bias_coef = connection_with_bias_layers[i].params
for j in range(np.size(mul_coef, 0)):
temp_expression = 'layer_%d_unit_%d = tanh( ' % (i + 1, j)
for k in range(np.size(mul_coef, 1)):
temp_expression += ' %f * layer_%d_unit_%d +' % (mul_coef[j, k], i, k)
temp_expression += ' %f);\n' % (bias_coef[j])
expression = temp_expression + expression # order of expressions matter in OpenMM
# second part: definition of inputs
index_of_backbone_atoms = [2, 5, 7, 9, 15, 17, 19];
for i in range(len(index_of_backbone_atoms) - 3):
index_of_coss = i
index_of_sins = i + 4
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_coss, index_of_coss, self._preprocessing_settings[index_of_coss][1], self._preprocessing_settings[index_of_coss][0])
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_sins, index_of_sins, self._preprocessing_settings[index_of_sins][1], self._preprocessing_settings[index_of_sins][0])
expression += 'raw_layer_0_unit_%d = cos(dihedral_angle_%d);\n' % (index_of_coss, i)
expression += 'raw_layer_0_unit_%d = sin(dihedral_angle_%d);\n' % (index_of_sins, i)
expression += 'dihedral_angle_%d = dihedral(p%d, p%d, p%d, p%d);\n' % \
(i, index_of_backbone_atoms[i], index_of_backbone_atoms[i+1],index_of_backbone_atoms[i+2],index_of_backbone_atoms[i+3])
return expression
def write_expression_into_file(self, out_file = None):
if out_file is None: out_file = self._energy_expression_file
expression = self.get_expression_of_network()
with open(out_file, 'w') as f_out:
f_out.write(expression)
return
def get_mid_result(self, input_data=None, connection_between_layers=None, connection_with_bias_layers=None):
if input_data is None: input_data = self._data_set
if connection_between_layers is None: connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None: connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
temp_mid_result = range(4)
mid_result = []
# first need to do preprocessing
for item in self.mapminmax_preprocess_using_coeff(input_data, self._preprocessing_settings):
for i in range(4):
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i]) # fix node_num
bias_coef = connection_with_bias_layers[i].params
previous_result = item if i == 0 else temp_mid_result[i - 1]
temp_mid_result[i] = np.dot(mul_coef, previous_result) + bias_coef
if i != 3: # the last output layer is a linear layer, while others are tanh layers
temp_mid_result[i] = map(tanh, temp_mid_result[i])
mid_result.append(copy.deepcopy(temp_mid_result)) # note that should use deepcopy
return mid_result
def get_PC_and_save_it_to_network(self):
'''get PCs and save the result into _PCs
'''
mid_result = self.get_mid_result()
self._PCs = [item[1] for item in mid_result]
return
def train(self):
####################### set up autoencoder begin #######################
node_num = self._node_num
in_layer = LinearLayer(node_num[0], "IL")
hidden_layers = [TanhLayer(node_num[1], "HL1"), TanhLayer(node_num[2], "HL2"), TanhLayer(node_num[3], "HL3")]
bias_layers = [BiasUnit("B1"),BiasUnit("B2"),BiasUnit("B3"),BiasUnit("B4")]
out_layer = LinearLayer(node_num[4], "OL")
layer_list = [in_layer] + hidden_layers + [out_layer]
molecule_net = FeedForwardNetwork()
molecule_net.addInputModule(in_layer)
for item in (hidden_layers + bias_layers):
molecule_net.addModule(item)
molecule_net.addOutputModule(out_layer)
connection_between_layers = range(4); connection_with_bias_layers = range(4)
for i in range(4):
connection_between_layers[i] = FullConnection(layer_list[i], layer_list[i+1])
connection_with_bias_layers[i] = FullConnection(bias_layers[i], layer_list[i+1])
molecule_net.addConnection(connection_between_layers[i]) # connect two neighbor layers
molecule_net.addConnection(connection_with_bias_layers[i])
molecule_net.sortModules() # this is some internal initialization process to make this module usable
####################### set up autoencoder end #######################
trainer = BackpropTrainer(molecule_net, learningrate=0.002,momentum=0.4,verbose=False, weightdecay=0.1, lrdecay=1)
data_set = SupervisedDataSet(node_num[0], node_num[4])
sincos = self._data_set
(sincos_after_process, self._preprocessing_settings) = self.get_mapminmax_preprocess_result_and_coeff(data = sincos)
for item in sincos_after_process: # is it needed?
data_set.addSample(item, item)
trainer.trainUntilConvergence(data_set, maxEpochs=50)
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
print("Done!\n")
return
def create_sge_files_for_simulation(self,potential_centers = None):
if potential_centers is None:
potential_centers = self.get_boundary_points()
neural_network_related.create_sge_files(potential_centers)
return
def get_boundary_points(self, list_of_points = None, num_of_bins = 5):
if list_of_points is None: list_of_points = self._PCs
x = [item[0] for item in list_of_points]
y = [item[1] for item in list_of_points]
temp = np.histogram2d(x,y, bins=[num_of_bins, num_of_bins])
hist_matrix = temp[0]
# add a set of zeros around this region
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins + 2), 1)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins +2), 1)
hist_matrix = (hist_matrix != 0).astype(int)
sum_of_neighbors = np.zeros(np.shape(hist_matrix)) # number of neighbors occupied with some points
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if i != 0: sum_of_neighbors[i,j] += hist_matrix[i - 1][j]
if j != 0: sum_of_neighbors[i,j] += hist_matrix[i][j - 1]
if i != np.shape(hist_matrix)[0] - 1: sum_of_neighbors[i,j] += hist_matrix[i + 1][j]
if j != np.shape(hist_matrix)[1] - 1: sum_of_neighbors[i,j] += hist_matrix[i][j + 1]
bin_width_0 = temp[1][1]-temp[1][0]
bin_width_1 = temp[2][1]-temp[2][0]
min_coor_in_PC_space_0 = temp[1][0] - 0.5 * bin_width_0 # multiply by 0.5 since we want the center of the grid
min_coor_in_PC_space_1 = temp[2][0] - 0.5 * bin_width_1
potential_centers = []
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if hist_matrix[i,j] == 0 and sum_of_neighbors[i,j] != 0: # no points in this block but there are points in neighboring blocks
temp_potential_center = [round(min_coor_in_PC_space_0 + i * bin_width_0, 2), round(min_coor_in_PC_space_1 + j * bin_width_1, 2)]
potential_centers.append(temp_potential_center)
return potential_centers
# this function is added after those old objects of A were created
def plotting_in_PC_space_with_coloring_option(self,
list_of_coordinate_files_for_plotting=None, # accept multiple files
color_option='pure'):
'''
by default, we are using training data, and we also allow external data input
'''
if list_of_coordinate_files_for_plotting is None:
PCs_to_plot = self._PCs
else:
temp_sincos = []
for item in list_of_coordinate_files_for_plotting:
temp_sincos += self.get_many_cossin_from_coordiantes_in_file(item)
temp_mid_result = self.get_mid_result(input_data = temp_sincos)
PCs_to_plot = [item[1] for item in temp_mid_result]
(x, y) = ([item[0] for item in PCs_to_plot], [item[1] for item in PCs_to_plot])
# coloring
if color_option == 'pure':
coloring = 'red'
elif color_option == 'step':
coloring = range(len(x))
fig, ax = plt.subplots()
ax.scatter(x,y, c=coloring)
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
plt.show()
return
But it seems that plotting_in_PC_space_with_coloring_option() was not binded to those old objects, is here any way to fix it (I do not want to recreate these objects since creation involves CPU-intensive calculation and would take very long time to do it)?
Thanks!
Something like this:
class A:
def q(self): print 1
a = A()
def f(self): print 2
setattr(A, 'f', f)
a.f()
This is called a monkey patch.

Categories