Appending non-DataFrame to pandas csv - python

I'm trying to find out if there is an easier way to append a Date column and an additional info column to my existing csv file. I'm adding these columns because this information is not in the JSON string from the REST API call.
import requests
import json
import http.client
import datetime
import pandas as pd
from pandas.io.json import json_normalize
url = api.getinfo()
r = requests.get(url, headers=headers, verify=False)
if r.status_code != http.client.OK:
raise requests.HTTPError(r)
jsonstring = json.dumps(r.json()["data"])
load = json.loads(jsonstring)
df = json_normalize(load)
col = ["poolId", "totalPoolCapacity", "totalLocatedCapacity",
"availableVolumeCapacity", "usedCapacityRate"]
with open('hss.csv', 'a') as f:
df.to_csv(f, header=False, columns=col)
a = pd.read_csv('hss.csv')
a['date'] = [datetime.date.today()] * len(a)
a.to_csv('hss.csv')
b = pd.read_csv('hss.csv')
b['storage system'] = "ssystem22"
b.to_csv('hss.csv')
I end up getting extra columns Unnamed: 0,Unnamed: 0.1 in my csv file each time the script is run. Also each time I append it overwrites the old dates.
,Unnamed: 0,Unnamed: 0.1,poolId,totalPoolCapacity, totalLocatedCapacity,availableVolumeCapacity,usedCapacityRate,date,storage system
0,155472,223618,565064,51,,2017-04-12,ssystem22
1,943174,819098,262042,58,,2017-04-12,ssystem22
0,764600,966017,046668,71,,2017-04-12,ssystem22
1,764600,335680,487650,76,,2017-04-12,ssystem22
2,373700,459800,304446,67,,2017-04-12,ssystem22

I kept researching and found how to fix this. I should have been using the pd.Series function. Below is the corrected code:
import requests
import json
import http.client
import datetime
import pandas as pd
from pandas.io.json import json_normalize
url = api.getinfo()
r = requests.get(url, headers=headers, verify=False)
if r.status_code != http.client.OK:
raise requests.HTTPError(r)
jsonstring = json.dumps(r.json()["data"])
load = json.loads(jsonstring)
df = json_normalize(load)
df['storage system'] = pd.Series('ssystem22', index=df.index)
df['date'] = pd.Series(datetime.date.today().strftime('%m-%d-%Y'),
index=df.index)
col = ["poolId", "totalPoolCapacity", "totalLocatedCapacity",
"availableVolumeCapacity", "usedCapacityRate", "storage system",
"date"]
with open(csvfile, 'a') as f:
df.to_csv(f, header=False, columns=col)

Related

Add sufix on duplicates in pandas dataframe Python

i am writing a script to download images.
I'm reading a excel file as a pandas dataframe
Column A -url links
Column B - Name
downloaded images will have this name, example "A.jpeg"
There will be duplicates in Column B[Name] in that case i would like to add a suffix on the image name.
so the output will be
A.jpeg
A-1.Jpeg
..
import requests
import pandas as pd
df = pd.read_excel(r'C:\Users\exdata1.xlsx')
for index, row in df.iterrows():
url = row['url']
file_name = url.split('/')
r = requests.get(url)
file_name=(row['name']+".jpeg")
if r.status_code == 200:
with open(file_name, "wb") as f:
f.write(r.content)
print (file_name)
I have been trying cumcount but can't really seem to get it to work..
Apreciate all the help I can get
You can try:
import requests
import pandas as pd
df = pd.read_excel(r"C:\Users\exdata1.xlsx")
cnt = {}
for index, row in df.iterrows():
name = row["name"]
if name not in cnt:
cnt[name] = 0
name = f"{name}.jpeg"
else:
cnt[name] += 1
name = f"{name}-{cnt[name]}.jpeg"
url = row["url"]
r = requests.get(url)
if r.status_code == 200:
with open(name, "wb") as f:
f.write(r.content)
print(name)
This will download the files as A.jpeg, A-1.jpeg, A-2.jpeg, ...

pandas dataframe doesn't add all elements

I'm trying this code on a folder with bunch of 'txt' files
import pandas as pd
import os
for review in os.listdir('ebert_reviews'):
with open(os.path.join('ebert_reviews',review),encoding='utf-8') as file:
list_dir=[]
title = file.readline()[:-1]
url = file.readline()[:-1]
review_text = file.read()
list_dir.append({'title':title,
'url':url,
'review_text':review_text})
#print(list_dir)
df = pd.DataFrame(list_dir)
print(df)
the dataframe only saves one of the rows and it's not even the last one, how can I add every entry to it?!
This should work
import pandas as pd
import os
list_dir=[]
for review in os.listdir('ebert_reviews'):
with open(os.path.join('ebert_reviews',review),encoding='utf-8') as file:
title = file.readline()[:-1]
url = file.readline()[:-1]
review_text = file.read()
list_dir.append({'title':title,
'url':url,
'review_text':review_text})
#print(list_dir)
df = pd.DataFrame(list_dir)
print(df)

API request loop through pagination in python

i am looking to loop through the coingecko api for all of the exchanges listed on there, pull the tickers that are listed for each exchange [this is paginated to 100 rows], loop through all of the pages. There is no way of telling how many. Then store all rows out to a CSV.
Here is what I have came up with so far.
from pycoingecko import CoinGeckoAPI
cg = CoinGeckoAPI()
import pandas as pd
#grab a list of all the exchangeslisted on CG
ex_list = cg.get_exchanges_list()
#df_ex_list = pd.read_json(exchanges_list)
df = pd.json_normalize(ex_list)
#output to csv
df.to_csv('exchange_list.csv', encoding='utf-8', index=False)
id_list = df['id'].tolist()
def get_ex_tickers():
for x in id_list:
# get tickers
d = cg.get_exchanges_tickers_by_id(x, page_integer = 2)###the num of page integers is not know
#import into pandas df
df = pd.json_normalize(d, record_path=['tickers'], meta=['name'])
#define path + filename
path = 'ticker_lists/'
filename = path+ x +'_ticker_list' + '.csv'
#output to csv
df.to_csv(filename, encoding='utf-8', index=False)
get_ex_tickers()

I can't save from dataframe to postgresql

import pandas as pd
import requests as rq
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://postgres:3434#127.0.0.1/postgres')
temp = pd.DataFrame()
df = pd.DataFrame()
vehicleList = {"LX59ANR", "SN63NBK", "YY64GRU"}
for ids in vehicleList:
r = rq.get('https://api.tfl.gov.uk/Vehicle/' + ids + '/Arrivals')
r = r.text
temp = pd.read_json(r)
temp['Type'] = ids
df = pd.concat([df, temp], sort=False).reset_index(drop=True)
df.head(0).to_sql('tfl_bus', engine, if_exists='replace', index=False) # truncates the table
Hello. cannot save data from pandas(dataframe) to postgresql. only column names occurred.
I removed head(0) result like this
This work , I added this line : df['timing'] = list(map(lambda x: json.dumps(x), df['timing']))
import sqlalchemy as sa
import psycopg2
import requests as rq
import pandas as pd
import json
r = rq.get('https://api.tfl.gov.uk/Vehicle/SN63NBK/Arrivals')
temp = pd.DataFrame()
df = pd.DataFrame()
r = r.text
temp = pd.read_json(r)
temp['Type'] = '1'
df = pd.concat([df, temp], sort=False).reset_index(drop=True)
engine=sa.create_engine('postgresql+psycopg2://postgres:3434#127.0.0.1/postgres')
df['timing'] = list(map(lambda x: json.dumps(x), df['timing']))
df.to_sql('tfl_bus2', engine, if_exists='replace', index=False)
df.head(0) needs to be replaced with just df.
The head(0) strips away the actual data leaving the columns...

Passing dataframe column vale as parameter in get url python

I am trying to pass a column value as url parameter in loop and I am also trying to put result into json file and later convert into excel. I need to do this without creating any function.
import pandas as pd
import requests
import json
from pandas.io.json import json_normalize
df = pd.read_excel('C:/Users/one.xlsx',converters={'name':str})
df = df['name']
df.head()
dd=df.values
user=b"user"
passwd=b'pass'
auth_values = (user, passwd)
for i in dd:
ur='http://xyz.co&name='
url =ur + str(dd)
response = (requests.get(url, auth=auth_values).text)
response_json = json.loads(response)
response_json=json.dumps(response,ensure_ascii=True)
writeFile =open('C:/Users/file1.json', 'w')
writeFile.write(response_json)
writeFile.close()
print(url)
you can try this.
import pandas as pd
import requests
import json
from pandas.io.json import json_normalize
df = pd.read_excel('C:/Users/one.xlsx',converters={'name':str})
df = df['name']
df.head()
dd=df.values
user=b"user"
passwd=b'pass'
auth_values = (user, passwd)
with open('C:/Users/file1.json', 'w') as writeFile:
for i in dd:
ur='http://xyz.co&name='
url =ur + str(i)
response = requests.get(url, auth=auth_values).text
response_json = json.loads(response)
response_json=json.dumps(response,ensure_ascii=True)
writeFile.write(response_json)
print(url)
For export to excel:
df = pd.read_excel('C:/Users/one.xlsx',converters={'name':str})
df = df['name']
dd=df.values
user=b"user"
passwd=b'pass'
auth_values = (user, passwd)
df_base = None
for i in dd:
ur='http://xyz.co&name='
url =ur + str(i)
response = requests.get(url, auth=auth_values).text
df = pd.read_json(response)
if df_base is None:
df_base = df
else:
df_base.append(df)
print(url)
df_base.to_excel("C:/Users/output.xlsx")

Categories