Reading a json file that has multiple lines - python

I have a function that I apply to a json file. It works if it looks like this:
import json
def myfunction(dictionary):
#does things
return new_dictionary
data = """{
"_id": {
"$oid": "5e7511c45cb29ef48b8cfcff"
},
"description": "some text",
"startDate": {
"$date": "5e7511c45cb29ef48b8cfcff"
},
"completionDate": {
"$date": "2021-01-05T14:59:58.046Z"
},
"videos":[{"$oid":"5ecf6cc19ad2a4dfea993fed"}]
}"""
info = json.loads(data)
refined = key_replacer(info)
new_data = json.dumps(refined)
print(new_data)
However, I need to apply it to a whole while and the input looks like this (there are multiple elements and they are not separated by commas, they are one after another):
{"_id":{"$oid":"5f06cb272cfede51800b6b53"},"company":{"$oid":"5cdac819b6d0092cd6fb69d3"},"name":"SomeName","videos":[{"$oid":"5ecf6cc19ad2a4dfea993fed"}]}
{"_id":{"$oid":"5ddb781fb4a9862c5fbd298c"},"company":{"$oid":"5d22cf72262f0301ecacd706"},"name":"SomeName2","videos":[{"$oid":"5dd3f09727658a1b9b4fb5fd"},{"$oid":"5d78b5a536e59001a4357f4c"},{"$oid":"5de0b85e129ef7026f27ad47"}]}
How could I do this? I tried opening and reading the file, using load and dump instead of loads and dumps, and it still doesn't work. Do I need to read, or iterate over every line?

You are dealing with ndjson(Newline delimited JSON) data format.
You have to read the whole data string, split it by lines and parse each line as a JSON object resulting in a list of JSONs:
def parse_ndjson(data):
return [json.loads(l) for l in data.splitlines()]
with open('C:\\Users\\test.json', 'r', encoding="utf8") as handle:
data = handle.read()
dicts = parse_ndjson(data)
for d in dicts:
new_d = my_function(d)
print("New dict", new_d)

Related

Python: Identify the duplicate JSON values and generate an output file with sets of duplicate input rows, one row per set of duplicates

I am trying to find duplicated JSON objects in a 30GB jsonlines file.
Given a JSON object A that look like this:
{
"data": {
"cert_index": 691749790,
"cert_link": "http://ct.googleapis.com/icarus/ct/v1/get-entries?start=691749790&end=691749790",
"chain": [{...},{...}],
"leaf_cert": {
"all_domains": [...],
"as_der": "MIIFcjCCBFqgAwIBAgISBDD2+d1gP36/+9uUveS...",
"extensions": {...},
"fingerprint": "0C:E4:AF:24:F1:AE:B1:09:B0:42:67:CB:F8:FC:B6:AF:1C:07:D6:5B",
"not_after": 1573738488,
"not_before": 1565962488,
"serial_number": "430F6F9DD603F7EBFFBDB94BDE4BBA4EC9A",
"subject": {...}
},
"seen": 1565966152.750253,
"source": {
"name": "Google 'Icarus' log",
"url": "ct.googleapis.com/icarus/"
},
"update_type": "PrecertLogEntry"
},
"message_type": "certificate_update"
}
How can I generate an output file where each row looks like this:
{"fingerprint":"0C:E4:AF:24:F1:AE:B1:09:B0:42:67:CB:F8:FC:B6:AF:1C:07:D6:5B", "certificates":[A, B, C]}
Here A, B, and C are the full JSON object for each of the duplicates.
You need to use an array with your information. And before adding a new JSON, check if the fingerprint is already in the array. For example:
currentFingerprint = myJson['data']['leaf_cert']['fingerprint']
for elem in arrayOfFingerprints:
if elem['fingerprint'] == currentFingerprint:
elem['certificates'].append(myJson)
break
else:
arrayOfFingerprints.append({'fingerprint': currentFingerprint, 'certificates': [myJson]}
I'm going to assume that you have already read the file and created a list of dicts.
from collections import defaultdict
import json
d = defaultdict(list)
for jobj in file:
d[jobj['data']['leaf_cert']['fingerprint']].append(jobj)
with open('file.txt', 'w') as out:
for k,v in d:
json.dump({"fingerprint":k, "certificates":v})

KeyError occures while opening the JSON txt file and setting it up into a DataFrame

I had a code, which gave me an empty DataFrame with no saved tweets.
I tried to debug it by putting print(line) under the for line in json file: and json_data = json.loads(line).
That resulted a KeyError.
How do I fix it?
Thank you.
list_df = list()
# read the .txt file, line by line, and append the json data in each line to the list
with open('tweet_json.txt', 'r') as json_file:
for line in json_file:
print(line)
json_data = json.loads(line)
print(line)
tweet_id = json_data['tweet_id']
fvrt_count = json_data['favorite_count']
rtwt_count = json_data['retweet_count']
list_df.append({'tweet_id': tweet_id,
'favorite_count': fvrt_count,
'retweet_count': rtwt_count})
# create a pandas DataFrame using the list
df = pd.DataFrame(list_df, columns = ['tweet_id', 'favorite_count', 'retweet_count'])
df.head()
Your comment says you're trying to save to a file, but your code kind of says that you're trying to read from a file. Here are examples of how to do both:
Writing to JSON
import json
import pandas as pd
content = { # This just dummy data, in the form of a dictionary
"tweet1": {
"id": 1,
"msg": "Yay, first!"
},
"tweet2": {
"id": 2,
"msg": "I'm always second :("
}
}
# Write it to a file called "tweet_json.txt" in JSON
with open("tweet_json.txt", "w") as json_file:
json.dump(content, json_file, indent=4) # indent=4 is optional, it makes it easier to read
Note the w (as in write) in open("tweet_json.txt", "w"). You're using r (as in read), which doesn't give you permission to write anything. Also note the use of json.dump() rather than json.load(). We then get a file that looks like this:
$ cat tweet_json.txt
{
"tweet1": {
"id": 1,
"msg": "Yay, first!"
},
"tweet2": {
"id": 2,
"msg": "I'm always second :("
}
}
Reading from JSON
Let's read the file that we just wrote, using pandas read_json():
import pandas as pd
df = pd.read_json("tweet_json.txt")
print(df)
Output looks like this:
>>> df
tweet1 tweet2
id 1 2
msg Yay, first! I'm always second :(

Is there a way to just grab one subset of json data from a large text file?

I'm looking to pull the "name" field from a large json text file and be able to store them in another file for later, but I'm getting every piece of data that was in my previous json file albeit slightly modified. How do I make it so I only grab the data after the "name": field in my json file?
I've tried
names = []
with open('./out.json', 'r') as f:
data = json.load(f)
for name in data:
names.append(data[name])
with open('./names.json','w') as f:
for name in names:
f.write('%s\r\n' % name)
and I'm getting my exact json file back, with no formatting and u' in front of everything, likely from the json.load(f), but I have no idea how to remedy this.
my text file is formatted like this, if it matters:
{
"array":[
{
"name": "Seranul",
"id": 5,
"type": "Paladin",
"itemLevel": 414,
"icon": "Paladin-Holy",
"total": 11107150,
"activeTime": 2205387,
"activeTimeReduced": 2205387
},
{
"name": "Contherious",
"id": 9,
"type": "Hunter",
"itemLevel": 412,
"icon": "Hunter-Marksmanship",
"total": 51102811,
"activeTime": 2637303,
"activeTimeReduced": 2637303
},
{
"name": "Unicorns",
"id": 17,
"type": "Priest",
"itemLevel": null,
"icon": "Priest",
"total": 12252005,
"activeTime": 1768883,
"activeTimeReduced": 1761797
},
...
}
]}
I'm expecting to see the corresponding data for each name field, but I'm getting my entire document back.
It looks like your code is ignoring the structure of the JSON data. Specifically, you are iterating through the keys in the JSON dictionary, which is just array, and then appending the value to you names list. This results in the whole array property being put into your names variable.
Here is what I believe you want: iterate through the entries in array and and them to a list, then export that as JSON to another file.
import json
names = []
with open('./out.json', 'r') as f:
data = json.load(f)
for entry in data["array"]:
names.append(entry["name"])
with open('./names.json', 'w') as f:
f.write(json.dumps(names))
This will result in the following JSON in names.json:
["Seranul", "Contherious", "Unicorns"]

Parse JSON structures in a txt file containing JSON and text structures

I have a txt file with json structures. the problem is the file does not only contain json structures but also raw text like log error:
2019-01-18 21:00:05.4521|INFO|Technical|Batch Started|
2019-01-18 21:00:08.8740|INFO|Technical|Got Entities List from 20160101 00:00 :
{
"name": "1111",
"results": [{
"filename": "xxxx",
"numberID": "7412"
}, {
"filename": "xgjhh",
"numberID": "E52"
}]
}
2019-01-18 21:00:05.4521|INFO|Technical|Batch Started|
2019-01-18 21:00:08.8740|INFO|Technical|Got Entities List from 20160101 00:00 :
{
"name": "jfkjgjkf",
"results": [{
"filename": "hhhhh",
"numberID": "478962"
}, {
"filename": "jkhgfc",
"number": "12544"
}]
}
I read the .txt file but trying to patch the jason structures I have an error:
IN :
import json
with open("data.txt", "r", encoding="utf-8", errors='ignore') as f:
json_data = json.load(f)
OUT : json.decoder.JSONDecodeError: Extra data: line 1 column 5 (char 4)
I would like to parce json and save as csv file.
A more general solution to parsing a file with JSON objects mixed with other content without any assumption of the non-JSON content would be to split the file content into fragments by the curly brackets, start with the first fragment that is an opening curly bracket, and then join the rest of fragments one by one until the joined string is parsable as JSON:
import re
fragments = iter(re.split('([{}])', f.read()))
while True:
try:
while True:
candidate = next(fragments)
if candidate == '{':
break
while True:
candidate += next(fragments)
try:
print(json.loads(candidate))
break
except json.decoder.JSONDecodeError:
pass
except StopIteration:
break
This outputs:
{'name': '1111', 'results': [{'filename': 'xxxx', 'numberID': '7412'}, {'filename': 'xgjhh', 'numberID': 'E52'}]}
{'name': 'jfkjgjkf', 'results': [{'filename': 'hhhhh', 'numberID': '478962'}, {'filename': 'jkhgfc', 'number': '12544'}]}
This solution will strip out the non-JSON structures, and wrap them in a containing JSON structure.This should do the job for you. I'm posting this as is for expediency, then I'll edit my answer for a more clear explanation. I'll edit this first bit when I've done that:
import json
with open("data.txt", "r", encoding="utf-8", errors='ignore') as f:
cleaned = ''.join([item.strip() if item.strip() is not '' else '-split_here-' for item in f.readlines() if '|INFO|' not in item]).split('-split_here-')
json_data = json.loads(json.dumps(('{"entries":[' + ''.join([entry + ', ' for entry in cleaned])[:-2] + ']}')))
Output:
{"entries":[{"name": "1111","results": [{"filename": "xxxx","numberID": "7412"}, {"filename": "xgjhh","numberID": "E52"}]}, {"name": "jfkjgjkf","results": [{"filename": "hhhhh","numberID": "478962"}, {"filename": "jkhgfc","number": "12544"}]}]}
What's going on here?
In the cleaned = ... line, we're using a list comprehension that creates a list of the lines in the file (f.readlines()) that do not contain the string |INFO| and adds the string -split_here- to the list whenever there's a blank line (where .strip() yields '').
Then, we're converting that list of lines (''.join()) into a string.
Finally we're converting that string (.split('-split_here-') into a list of lists, separating the JSON structures into their own lists, marked by blank lines in data.txt.
In the json_data = ... line, we're appending a ', ' to each of the JSON structures using a list comprehension.
Then, we convert that list back into a single string, stripping off the last ', ' (.join()[:-2]. [:-2]slices of the last two characters from the string.).
We then wrap the string with '{"entries":[' and ']}' to make the whole thing a valid JSON structure, and feed it to json.dumps and json.loads to clean any encoding and load your data a a python object.
You could do one of several things:
On the Command Line, remove all lines where, say, "|INFO|Technical|" appears (assuming this appears in every line of raw text):
sed -i '' -e '/\|INFO\|Technical/d' yourfilename (if on Mac),
sed -i '/\|INFO\|Technical/d' yourfilename (if on Linux).
Move these raw lines into their own JSON fields
Use the "text structures" as a delimiter between JSON objects.
Iterate over the lines in the file, saving them to a buffer until you encounter a line that is a text line, at which point parse the lines you've saved as a JSON object.
import re
import json
def is_text(line):
# returns True if line starts with a date and time in "YYYY-MM-DD HH:MM:SS" format
line = line.lstrip('|') # you said some lines start with a leading |, remove it
return re.match("^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})", line)
json_objects = []
with open("data.txt") as f:
json_lines = []
for line in f:
if not is_text(line):
json_lines.append(line)
else:
# if there's multiple text lines in a row json_lines will be empty
if json_lines:
json_objects.append(json.loads("".join(json_lines)))
json_lines = []
# we still need to parse the remaining object in json_lines
# if the file doesn't end in a text line
if json_lines:
json_objects.append(json.loads("".join(json_lines)))
print(json_objects)
Repeating logic in the last two lines is a bit ugly, but you need to handle the case where the last line in your file is not a text line, so when you're done with the for loop you need parse the last object sitting in json_lines if there is one.
I'm assuming there's never more than one JSON object between text lines and also my regex expression for a date will break in 8,000 years.
You could count curly brackets in your file to find beginning and ending of your jsons, and store them in list, here found_jsons.
import json
open_chars = 0
saved_content = []
found_jsons = []
for i in content.splitlines():
open_chars += i.count('{')
if open_chars:
saved_content.append(i)
open_chars -= i.count('}')
if open_chars == 0 and saved_content:
found_jsons.append(json.loads('\n'.join(saved_content)))
saved_content = []
for i in found_jsons:
print(json.dumps(i, indent=4))
Output
{
"results": [
{
"numberID": "7412",
"filename": "xxxx"
},
{
"numberID": "E52",
"filename": "xgjhh"
}
],
"name": "1111"
}
{
"results": [
{
"numberID": "478962",
"filename": "hhhhh"
},
{
"number": "12544",
"filename": "jkhgfc"
}
],
"name": "jfkjgjkf"
}

Python modify structure (to be accepted by BQ) of JSON with python and save it

I would like to ask if there is an easy way to modify JSON by using Python?
I have found some of the relevant topic- How to update json file with python But could not figure out the solution for my current issue.
Currently, JSON looks like this:
{
"X": [
{
"sample_topic_x":"sample_content_x_1",
...
}
{
"sample_topic_x":"sample_content_x_2",
...
}
......
]
"Y": [
{
"sample_topic_y":"sample_content_y_1",
...
}
{
"sample_topic_y":"sample_content_y_2",
...
}
......
]
}
Required: To be accepted by BQ / Need to remove "Y", keep only "X" in this format.
{"sample_topic_x":"sample_content_x_1",.....}
{"sample_topic_x":"sample_content_x_2",.....}
{"sample_topic_x":"sample_content_x_3",.....}
Any relevant documentation, topics?
P.S> Update 1.0
import json
json_path = 'C:\XXX\exportReport.json'
def updateJsonFile():
jsonFile = open(json_path, "r") # Open the JSON file for reading
data = json.load(jsonFile) # Read the JSON into the buffer
jsonFile.close() # Close the JSON file
updateJsonFile()
Solution:
import json
json_path = 'C:\XXX\exportReport.json'
output_path = 'C:\XXX\your_output_file.txt'
with open(json_path) as f:
data = json.loads(f.read())
# Opening output file in append mode
# Note: Output file is not JSON, as the required format is not valid json
with open(output_file, "a+") as op:
for element in data.get('X'):
op.write(json.dumps(element) + "\n")
Explanation:
Load the input json file using json.loads. The output file will be a plain text file and not a JSON file as the required output format is not a valid JSON. Use a .txt file for storing the output. Store value of json.loads() in data. To get inner element X which is a list of dictionaries, use data.get('X'), which will return list. Iterate over it and write json.dumps() to the output file, each element in a newline.
C:\XXX\exportReport.json
{
"X": [
{
"sample_topic_x":"sample_content_x_1",
...
}
{
"sample_topic_x":"sample_content_x_2",
...
}
......
]
"Y": [
{
"sample_topic_y":"sample_content_y_1",
...
}
{
"sample_topic_y":"sample_content_y_2",
...
}
......
]
}
C:\XXX\your_output_file.txt
{"sample_topic_x":"sample_content_x_1",.....}
{"sample_topic_x":"sample_content_x_2",.....}
{"sample_topic_x":"sample_content_x_3",.....}
You need extract parent data at first. And definite this to a variable. And search "X" data in this variable.

Categories