Loop only saves last line to csv - python

I'm trying to save a json page to a csv but it only stores the last row (although everything is visible on the print statement).
Is there anything wrong with my code?
url = 'http://mapit.mysociety.org/areas/WMC'
response = urllib2.urlopen(url)
html = response.read()
data = json.loads(html)
with open('mycsvfile.csv', 'w') as f: # Just use 'w' mode in 3.x
w = csv.DictWriter(f,['id', 'gss','name','country_name'])
w.writeheader()
for entry in data.values():
record = {
"id": entry["id"],
"gss": entry["codes"]["gss"],
"name": entry["name"],
"country_name": entry["country_name"],
}
print record
w.writerow(record)
Thanks in advance.
Kind regards

Related

find value in JSON from list in text file

What I am trying to accomplish:
Use contents of a text file to search for values in JSON file.
My text file contains one entry per line. The will match values in the JSON file.
thing1
thing2
thing3
I would like to iterate this list and return matches from the JSON file
{
"ssb": [
{
"uid": 27,
"appid": "thing1",
"title": "Title of thing",
"created_time": "2009-11-17T01:32:28+00:00",
"published_time": "2009-11-17T01:32:28+00:00",
"updated_time": "2022-11-14T17:26:23+00:00",
}
]
}
import json
upgrade_apps = open("apps_to_upgrade.txt", "r")
ua = upgrade_apps.readlines()
upgrade_apps.close()
ua3 = "thing1"
#Testing results
print(ua)
print(type(ua))
for atu in ua:
print(atu)
## ^ this returns the expected text from file
with open('dump.json') as f:
data = json.load(f)
f.close()
jsonResult = data['ssb']
for i in jsonResult:
if i['appid'] == ua3: #<THIS IS WHERE I AM STUCK> If i use ua3 OR "thing1" i get the expected result
print(i['uid'],i['appid'])
break
I have also tried including a for loop before the first for loop. I get back only the last entry in the text file.
#all of the above codeblock plus
for atu in ua:
with open('dump.json') as f:
data = json.load(f)
f.close()
jsonResult = data['ssb']
for i in jsonResult:
if i['appid'] == atu:
print(i['uid'],i['appid'])
break
Remove the newlines from the lines in ua, and turn it into a set. Then you can test if i['appid'] is in the set.
import json
with open("apps_to_upgrade.txt", "r") as upgrade_apps
ua = set(line.strip() for line in upgrade_apps)
with open('dump.json') as f:
data = json.load(f)
jsonResult = data['ssb']
for i in jsonResult:
if i['appid'] in ua:
print(i['uid'],i['appid'])
break

How add lines in a text to an append request on API Google spreadsheet without destroy my quota

I'm not an expert programmer and I extract some data to a text file, and now I want to import it to a Google spreadsheet. I can do this and it works till quota limits. How can I add all the lines to a single request?
insert_data_option = 'OVERWRITE'
with open(r"juntos.txt", "r", encoding="utf8") as inputfile:
for line in inputfile:
string = line.rstrip(" \n")
value_range_body = {
"majorDimension": "ROWS",
"range": "A:B",
"values": [
[
string
]
]
}
request = service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_, valueInputOption=value_input_option, insertDataOption=insert_data_option, body=value_range_body)
response = request.execute()
I believe your goal is as follows.
You want to append the values to Spreadsheet using one API call of Sheets API.
In your situation, how about the following modification?
Modified script:
insert_data_option = 'OVERWRITE'
data = []
with open("juntos.txt", "r", encoding="utf8") as inputfile:
for line in inputfile:
string = line.rstrip(" \n")
data.append([string])
value_input_option = "USER_ENTERED"
value_range_body = {"values": data}
request = service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range="A:B", valueInputOption=value_input_option, insertDataOption=insert_data_option, body=value_range_body)
response = request.execute()
In this case, the values are appended to the 1st tab of the Spreadsheet by one API call.
Note:
In your situation, in order to retrieve data, the following script might be able to be also used.
data = []
with open("juntos.txt", "r", encoding="utf8") as inputfile:
data = inputfile.read().split("\n")
data = [[e.rstrip(" \n")] for e in data]
Reference:
Method: spreadsheets.values.append

Call a function inside a loop is not working in Python

I have a function/method which will create a JIRA ticket. I want to call that function inside a loop so that i can pass different descriptions into the JIRA ticket. I have a CSV file with different service failures so the idea is to create JIRA for each row from the csv file.
My JIRA Method
def jira_rest_call(description):
# Build the text for the JIRA ticket.
jira_summary = "Pro active Monitoring"
jira_assignee='USERID'
jira_description = description
priority = 'High'
labels_list = 'Production Failure';
# Build the JSON to post to JIRA
json_data = '''
{
"fields":{
"project":{
"id": "25102",
"key": "ABC"
},
"assignee":{"name":"%s"},
"summary": "%s",
"issuetype":{
"name":"Story"
},
"description": "%s",
"priority":{"name":"%s"},
"labels":["%s"]
}
}''' % (jira_assignee,jira_summary, jira_description,priority,labels_list)
# Set the root JIRA URL, and encode the username and password
url = 'https://jira-abb.net/rest/api/2/issue'
userpass = 'Z683050' + ':' + '*******'
encoded_u = base64.b64encode(userpass.encode()).decode()
headers = {"Authorization" : "Basic %s" % encoded_u}
headers={'Content-Type':'application/json'}
# Build the request
r = requests.post(url,auth=HTTPBasicAuth('Z683050', ''*******'), headers=headers, data=json_data)
# Send the request and grab JSON response
# response = urlopen(restreq, data)
# Load into a JSON object and return that to the calling function
return r
I am calling this method from a different Python module like this -
def jira_creation():
with open ('test_duplicates_1.csv','r') as csv_file:
for i in csv_file:
print([i])
jira_rest_call([i])
My CSV Data looks like this
PDFDownloader,Backend failed,100
ImageProcess,NullPointer,200
So jira_creation() method has invoked jira_rest_call() and created a ticket only with the first line but i am expecting two tickets.
What is wrong in this code?
with open ('test_duplicates_1.csv','r') as csv_file:
for i in csv_file:
print([i])
jira_rest_call([i])
I even tested the print statement(print([i])), its printing two times but the method call(jira_rest_call([i])) has happened only once.
You are not reading the csv file the right way. Try this instead:
with open ('test_duplicates_1.csv','r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
print(row)
jira_rest_call(row['<column_name>']) # I don't know what the column names are in your csv file. Replace with the right one.

Missing data in Python

I'm trying to import a JSON file into Python to do some data analysis. Each JSON object has a lot of different variables in it (about 7-10). Some objects have certain variables, while other objects don't. I am interested in specifically five variables from each json line. However, some objects have missing data. How do I make it so that the program will put a None for every missing data?
import json
data = []
keys = ["hostid","time", "userid","link", "title"]
m = len(keys)
with open('test.json') as json_data:
for line in json_data:
dataline = json.loads(line)
row = []
for i in xrange(m):
row.append(dataline[keys[i]])
data.append(row)
json_data.close()
data = np.array(data)
Here are some sample JSON objects. As you can see the first object has all five variables that I want, but the second object does not have data for the "title" variable.
{
"title": "Monster Man",
"link": "http://monsters4ever.com/tagged/rosemary%27s%20baby%20(1968)",
"userid": 130290,
"field5": "lezmer Brunch at City Winery? Who Knew? -- Grub Street Chicago\"",
"hostid": "3969937ab0a3e2db8690c482564006a7",
"time": 376541
}
{
"link": "http://www.sfgate.com/world/article/WORLD-News-of-the-Day-From-Across-the-Globe-4120318.php",
"userid": 227954, "field5": "ries « SHEfinds\"",
"hostid": "6096407936827c96fa0833f26ab33b76",
"time": 376541
}
Can someone please help me?
Rather than filling in the missing data, when you try to retrieve the data from the object, instead of the usual: x['field'], try x.get('field').
e.g.:
with open('test.json') as json_data:
for line in json_data:
dataline = json.loads(line)
row = []
for key in keys:
row.append(dataline.get(key))
# better is:
# row = [dataline.get(key) for key in keys]
data.append(row)
This works because dict.get returns None if the key isn't found in the dictionary.
If you really don't want to do that, and you know the fields that you want, you can use dict.setdefault to put None in there:
for field in fields_you_care_about:
obj.setdefault(field, None)
I'd use try. I'd also just iterate through the keys list.
with open('test.json') as json_data:
for line in json_data:
dataline = json.loads(line)
row = []
for i in keys:#iterate through keys
try:
row.append(dataline[i])
except:
pass
data.append(row)
json_data.close()
You could use try, since it should throw an exception when you attempt to call a key that doesn't exist:
import json
data = []
keys = ["hostid","time", "userid","link", "title"]
m = len(keys)
with open('test.json') as json_data:
for line in json_data:
dataline = json.loads(line)
row = []
for i in xrange(m):
try:
row.append(dataline[keys[i]])
except Exception:
row.append(None)
data.append(row)
json_data.close()
data = np.array(data)

Python read JSON file and modify

Hi I am trying to take the data from a json file and insert and id then perform POST REST.
my file data.json has:
{
'name':'myname'
}
and I would like to add an id so that the json data looks like:
{
'id': 134,
'name': 'myname'
}
So I tried:
import json
f = open("data.json","r")
data = f.read()
jsonObj = json.loads(data)
I can't get to load the json format file.
What should I do so that I can convert the json file into json object and add another id value.
Set item using data['id'] = ....
import json
with open('data.json', 'r+') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate() # remove remaining part
falsetru's solution is nice, but has a little bug:
Suppose original 'id' length was larger than 5 characters. When we then dump with the new 'id' (134 with only 3 characters) the length of the string being written from position 0 in file is shorter than the original length. Extra chars (such as '}') left in file from the original content.
I solved that by replacing the original file.
import json
import os
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
os.remove(filename)
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
I would like to present a modified version of Vadim's solution. It helps to deal with asynchronous requests to write/modify json file. I know it wasn't a part of the original question but might be helpful for others.
In case of asynchronous file modification os.remove(filename) will raise FileNotFoundError if requests emerge frequently. To overcome this problem you can create temporary file with modified content and then rename it simultaneously replacing old version. This solution works fine both for synchronous and asynchronous cases.
import os, json, uuid
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
# add, remove, modify content
# create randomly named temporary file to avoid
# interference with other thread/asynchronous request
tempfile = os.path.join(os.path.dirname(filename), str(uuid.uuid4()))
with open(tempfile, 'w') as f:
json.dump(data, f, indent=4)
# rename temporary file replacing old file
os.rename(tempfile, filename)
There is really quite a number of ways to do this and all of the above are in one way or another valid approaches... Let me add a straightforward proposition. So assuming your current existing json file looks is this....
{
"name":"myname"
}
And you want to bring in this new json content (adding key "id")
{
"id": "134",
"name": "myname"
}
My approach has always been to keep the code extremely readable with easily traceable logic. So first, we read the entire existing json file into memory, assuming you are very well aware of your json's existing key(s).
import json
# first, get the absolute path to json file
PATH_TO_JSON = 'data.json' # assuming same directory (but you can work your magic here with os.)
# read existing json to memory. you do this to preserve whatever existing data.
with open(PATH_TO_JSON,'r') as jsonfile:
json_content = json.load(jsonfile) # this is now in memory! you can use it outside 'open'
Next, we use the 'with open()' syntax again, with the 'w' option. 'w' is a write mode which lets us edit and write new information to the file. Here s the catch that works for us ::: any existing json with the same target write name will be erased automatically.
So what we can do now, is simply write to the same filename with the new data
# add the id key-value pair (rmbr that it already has the "name" key value)
json_content["id"] = "134"
with open(PATH_TO_JSON,'w') as jsonfile:
json.dump(json_content, jsonfile, indent=4) # you decide the indentation level
And there you go!
data.json should be good to go for an good old POST request
try this script:
with open("data.json") as f:
data = json.load(f)
data["id"] = 134
json.dump(data, open("data.json", "w"), indent = 4)
the result is:
{
"name":"mynamme",
"id":134
}
Just the arrangement is different, You can solve the problem by converting the "data" type to a list, then arranging it as you wish, then returning it and saving the file, like that:
index_add = 0
with open("data.json") as f:
data = json.load(f)
data_li = [[k, v] for k, v in data.items()]
data_li.insert(index_add, ["id", 134])
data = {data_li[i][0]:data_li[i][1] for i in range(0, len(data_li))}
json.dump(data, open("data.json", "w"), indent = 4)
the result is:
{
"id":134,
"name":"myname"
}
you can add if condition in order not to repeat the key, just change it, like that:
index_add = 0
n_k = "id"
n_v = 134
with open("data.json") as f:
data = json.load(f)
if n_k in data:
data[n_k] = n_v
else:
data_li = [[k, v] for k, v in data.items()]
data_li.insert(index_add, [n_k, n_v])
data = {data_li[i][0]:data_li[i][1] for i in range(0, len(data_li))}
json.dump(data, open("data.json", "w"), indent = 4)
This implementation should suffice:
with open(jsonfile, 'r') as file:
data = json.load(file)
data[id] = value
with open(jsonfile, 'w') as file:
json.dump(data, file)
using context manager for the opening of the jsonfile.
data holds the updated object and dumped into the overwritten jsonfile in 'w' mode.
Not exactly your solution but might help some people solving this issue with keys.
I have list of files in folder, and i need to make Jason out of it with keys.
After many hours of trying the solution is simple.
Solution:
async def return_file_names():
dir_list = os.listdir("./tmp/")
json_dict = {"responseObj":[{"Key": dir_list.index(value),"Value": value} for value in dir_list]}
print(json_dict)
return(json_dict)
Response look like this:
{
"responseObj": [
{
"Key": 0,
"Value": "bottom_mask.GBS"
},
{
"Key": 1,
"Value": "bottom_copper.GBL"
},
{
"Key": 2,
"Value": "copper.GTL"
},
{
"Key": 3,
"Value": "soldermask.GTS"
},
{
"Key": 4,
"Value": "ncdrill.DRD"
},
{
"Key": 5,
"Value": "silkscreen.GTO"
}
]
}

Categories