How to process huge Apache log file using python - python

I have a CSV file that contains a column called click_id, and I want to use this click_id to search a large Apache log file (around 3GB) for the corresponding log entries. When a matching log entry is found, I need to extract the user agent and other information from the log entry. I would also like to group and count similar log entries and write the results to another CSV file.
What is the most efficient and reliable way to accomplish this task in Python? What is the best way to handle the large size of the log file and ensure that the script runs efficiently without running out of memory or causing other performance issues?
This is what I have tried but its been 3 days and it is still running.
import csv
from collections import defaultdict
from user_agents import parse
clickid_list = []
device_list = []
with open('data.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
# check if click_id column is not blank or null
if row[29] != "" and row[29] != "null" and row[29] != "click_id":
clickid_list.append(row[29])
matched_lines_count = defaultdict(int)
def log_file_generator(filename, chunk_size=200 * 1024 * 1024):
with open(filename, 'r') as file:
while True:
chunk = file.readlines(chunk_size)
if not chunk:
break
yield chunk
for chunk in log_file_generator('data.log'):
for line in chunk:
for gclid in clickid_list:
if gclid in line:
string = "'" + str(line) + "'"
user_agent = parse(string)
device = user_agent.device.family
device_brand = user_agent.device.brand
device_model = user_agent.device.model
os = user_agent.os.family
os_version = user_agent.os.version
browser= user_agent.browser.family
browser_version= user_agent.browser.version
if device in matched_lines_count:
matched_lines_count[device]["count"] += 1
print(matched_lines_count[device]["count"])
else:
matched_lines_count[device] = {"count": 1, "os": os,"os_version": os_version,"browser": browser,"browser_version": browser_version,"device_brand": device_brand,"device_model": device_model}
# sort garne
sorted_matched_lines_count = sorted(matched_lines_count.items(), key=lambda x: x[1]['count'], reverse=True)
with open("test_op.csv", "a", newline="") as file:
writer = csv.writer(file)
writer.writerows([["Device", "Count", "OS","OS version","Browser","Browser version","device_brand","device model"]])
for line, count in sorted_matched_lines_count:
# if count['count'] >= 20:
# print(f"Matched Line: {line} | Count: {count['count']} | OS: {count['os']}")
# write the data to a CSV file
writer.writerow([line,count['count'],count['os'],count['os_version'],count['browser'],count['browser_version'],count['device_brand'],count['device_model']])
Example of log:
127.0.0.1 - - [03/Nov/2022:06:50:20 +0000] "GET /access?click_id=12345678925455 HTTP/1.1" 200 39913 "-" "Mozilla/5.0 (Linux; Android 11; SM-A107F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Mobile Safari/537.36"
127.0.0.1 - - [03/Nov/2022:06:50:22 +0000] "GET /access?click_id=123456789 HTTP/1.1" 200 39914 "-" "Mozilla/5.0 (Linux; Android 11; SM-A705FN) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36"
Result expected.
I am new to Python,any code examples or pointers to relevant libraries or tools would be greatly appreciated.
Thank you!

You can use PySpark, then you have big date. also then you reduce date can use Pandas. PySpark is similar then Pandas.
#pandas
pd.read_csv('P00000001-ALL.csv')
#pyspark
spark = SparkSession.builder().master("local[1]")
.appName("SparkByExamples.com")
.getOrCreate()
df = spark.read.csv("/tmp/resources/zipcodes.csv")
df.printSchema()

Related

UserWarning: Calling close() on already closed file. warn("Calling close() on already closed file.")

This error is apparently stemming from xlsxwriter. I'm not sure on what line of my code it's coming from because my editor Visual Studio 2019 crashes every time I try to debug. But, I get this error on a laptop while on a VPN and remote desktop connection. If I run the same code from my remote machine I don't get the error. The error doesn't seem to affect the output, though, because the script finishes and saves successfully. But, how do I get rid of this error?
My Code:
import requests
from bs4 import BeautifulSoup
import pandas as pd
from pandas import ExcelWriter
from datetime import datetime
import os
#set the headers as a browser
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
#set up file name
file_path = r"C:\Users\jpilbeam"
excel_file = 'bargetraffic' + str(datetime.now().strftime('_%m_%d_%Y')) + '.xlsx'
excel_file_full = os.path.join(file_path, excel_file)
lockName = ['Dresden Island Lock', 'Brandon Rd Lock', 'Lockport Lock']
lockNo = ['02', '03', '04']
results = []
for lock in lockNo:
url = 'https://corpslocks.usace.army.mil/lpwb/xml.lockqueue?in_river=IL&in_lock=' + lock
#print (url)
link = requests.get(url).text
soup = BeautifulSoup(link,'lxml')
#get elements of row tags
rows = soup.find_all('row')
sheet = pd.DataFrame()
for row in rows:
name = row.find('vessel_name').text.strip()
no = row.find('vessel_no').text.strip()
dir = row.find('direction').text.strip()
barno = row.find('num_barges').text.strip()
arr = row.find('arrival_date').text.strip()
try:
end = row.find('end_of_lockage').text.strip()
except:
result = ''
df = pd.DataFrame([[name,no,dir,barno,arr, end]], columns=['Name','Vessel No.','Direction','Number of Barges','Arrival', 'End of Lockage'])
sheet = sheet.append(df,sort=True).reset_index(drop=True)
results.append(sheet)
def save_xls(list_dfs, xls_path):
with ExcelWriter(xls_path) as writer:
for n, df in enumerate(list_dfs):
df.to_excel(writer,'%s' %lockName[n],index=False,)
writer.save()
save_xls(results,excel_file_full)
print('----done----')
Error:
C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\lib\site-packages\xlsxwriter\workbook.py:329: UserWarning: Calling close() on already closed file.
warn("Calling close() on already closed file.")
I put the save part in a try except block according to this help doc, but I must be doing it wrong.
while True:
try:
def save_xls(list_dfs, xls_path):
with ExcelWriter(xls_path) as writer:
for n, df in enumerate(list_dfs):
df.to_excel(writer,'%s' %lockName[n],index=False,)
writer.save()
save_xls(results,excel_file_full)
except xlsxwriter.exceptions.FileCreateError as e:
print(e)
print('----done----')
The warning occurs because you you are calling to_excel() within a with statement which effectively closes/saves the file once it leaves the scope. You then call save() which tries to close the file again but since it is already closed you get a warning (not an error or exception).

how to write system info to a spreadsheet in python

I am trying to write system info to a spreadsheet. but when I try to use my variables they come out black
import csv
import os
import linecache
os.system('getmac -v > mac.txt')
os.system("wmic bios get serialnumber > serial.txt")
os.system("wmic computersystem get model > model.txt")
os.system("hostname > hostname.txt")
os.system("ipconfig > ip.txt")
open('ip1.txt','w').writelines([line for line in open('ip.txt')if 'IPv4' in line])
open('mac1.txt','w').writelines([line for line in open('mac.txt')if 'Wi-Fi' in line])
open('mac2.txt','w').writelines([line for line in open('mac.txt')if 'Ethernet' in line])
serial = linecache.getline('serial.txt', 3)
model = linecache.getline('model.txt', 3)
mac = open("mac.txt","r")
IP = open("ip1.txt","r")
mac1 = open("mac1.txt","r")
mac2 = open("mac2.txt","r")
hostname = open("hostname.txt","r")
Rmac = mac.read()
Rip = IP.read()
Rmac1 = mac1.read()
Rmac2 = mac2.read()
Rhostname = hostname.read()
myData = [[model]]
myFile = open('example2.csv', 'w')
with myFile:
writer = csv.writer(myFile)
writer.writerows(myData)
this just will not write the information to the spreadsheet? what am I doing wrong? I am very new to programming btw
You don't need intermediary files, why not call your commands and write their info to your CSV immediately without all that back and forward dancing?
import csv
import subprocess
# get the model
model = subprocess.check_output(["WMIC", "computersystem", "get", "model"],
universal_newlines=True).strip().rsplit("\n", 1)[1]
# get the serial
serial = subprocess.check_output(["WMIC", "bios", "get", "serialnumber"],
universal_newlines=True).strip().rsplit("\n", 1)[1]
# get the host name
hostname = subprocess.check_output(["hostname"], universal_newlines=True).strip()
# get WMI output for all addresses
ips = subprocess.check_output(["WMIC", "NICCONFIG", "where", "IPEnabled=true",
"get", "IPAddress"],
universal_newlines=True).strip().split("\n\n")[1:]
# post-process to get the addresses only
ips = [ip.split(",")[0].strip('"{} ') for ip in ips]
# etc.
with open("example2.csv", "wb") as f: # open your CSV for writing
writer = csv.writer(f) # create a writer
# you didn't write a header but let's add it in
writer.writerow(["model", "serial", "hostname", "ips"]) # etc., you get the picture...
writer.writerow([model, serial, hostname, ",".join(ips)]) # add other columns, too
And you'll get a nice example2.csv containing:
model,serial,hostname,ips
Your Model,Your Serial,Your-Hostname,List.Of.IP.Addresses
Do the same for the other fields and you're done.

TypeError: Incorrect padding (b64decode Python)

Trying to scrape one site (Russian language, cyrillic) and save all content in csv, but get error
Traceback (most recent call last):
File "/Users/kr/PycharmProjects/education_py/credit_parser.py", line 30, in
base64.b64decode(listing_title[0].encode('utf-8')),
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/base64.py", line 76, in b64decode
raise TypeError(msg)
TypeError: Incorrect padding
My code
# coding: utf8
import requests
from lxml.html import fromstring
import csv
import base64
headers = {
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/601.6.17 (KHTML, like Gecko) Version/9.1.1 Safari/601.6.17"
}
csvfile = open('credit-listing.csv', 'wb')
writer = csv.writer(csvfile, quotechar='|', quoting=csv.QUOTE_ALL)
i = 1
while i < 2:
url = requests.get("http://credit-board.ru/index.php?page=search&sCategory=116&iPage={}".format(i), headers=headers)
page_html = fromstring(url.content)
all_listings = page_html.xpath('//*[#id="listing-card-list"]/li')
listings_list = []
for listing in all_listings:
listing_urls = listing.xpath('./div/div/div/div/a/#href')[0]
listing_request = requests.get(listing_urls)
listing_html = fromstring(listing_request.content)
listing_title = listing_html.xpath('//*[#id="item-content"]/h1/strong/text()')
listing_text = listing_html.xpath('//*[#id="description"]/p[1]/text()')
listing_meta = listing_html.xpath('//*[#id="custom_fields"]/div/div/text()')
listings_list.append([listing_title, listing_text, listing_meta])
writer.writerow([
base64.b64decode(listing_title[0].encode('utf-8')),
base64.b64decode(listing_text[0].encode('utf-8')),
base64.b64decode(listing_meta[0].encode('utf-8'))
])
i+=1
print i
You should use b64encode instead of b64decode.
Like this :
try:
if divmod(len(field),4)[1] != 0:
field += "="*(4-divmod(len(field),4)[1])
#decode field here
except Exception,e: print e
Field = base64 encoding items.
Encrypted not base64.encoding, encode first before decoding and never decode other tools (field.encode('utf-8') is wrong) all base64 encoding items got url_safe character pattern.

Alphabetically sorting URLs to Download Image

Having an issue with the sorting of urls. The .jpg files end in "xxxx-xxxx.jpg". The second set of keys need to be sorted in alphabetical order. Thus far I've only been able to sort the first set of characters alphabetically (which is not necessary).
For instance:
http://code.google.com/edu/languages/google-python-class/images/puzzle/p-babf-bbac.jpg
is proceeding
http://code.google.com/edu/languages/google-python-class/images/puzzle/p-babh-bajc.jpg
when
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import requests
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def url_sort_key(url):
print url [-8:]
#Extract the puzzle urls from inside a logfile
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
# +++your code here+++
# Use open function to search fort the urls containing "puzzle/p"
# Use a line split to pick out the 6th section of the filename
# Sort out all repeated urls, and return sorted list
with open(filename) as f:
out = set()
for line in f:
if re.search("puzzle/p", line):
url = "http://code.google.com" + line.split(" ")[6]
print line.split(" ")
out.add(url)
return sorted(list(out))
# Complete the download_images function, which takes a sorted
# list of urls and a directory
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
# ++your code here++
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# Create an index
index = file(os.path.join(dest_dir, 'index.html'), 'w')
index.write('<html><body>\n')
i = 0
for img_url in img_urls:
i += 1
local_name = 'img%d' %i
print "Retrieving...", local_name
print local_name
print dest_dir
print img_url
response = requests.get(img_url)
if response.status_code == 200:
f = open(os.path.join(dest_dir,local_name + ".jpg"), 'wb')
f.write(response.content)
f.close()
index.write ('<img src="%s">' % (local_name + ".jpg"))
index.write('\n</body></html>\n')
index.close()
def main():
args = sys.argv[1:]
print args
if not args:
print ('usage: [--todir dir] logfile ')
sys.exit(1)
todir = None
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print ('\n'.join(img_urls))
if __name__ == '__main__':
main()
I think the error lies in the return for the read_urls function, but am not positive.
Given the urls end in the format
xxxx-yyyy.jpg
and you want to sort the urls based on the second key, i.e. yyyy
def read_urls(filename):
with open(filename) as f:
s = {el.rstrip() for el in f if 'puzzle' in el}
return sorted(s, key=lambda u: u[-8:-4]) # u[-13:-9] if need to sort on the first key
For example, with an input file containing
http://localhost/p-xxxx-yyyy.jpg
http://code.google.com/edu/languages/google-python-class/images/puzzle/p-babf-bbac.jpg
http://code.google.com/edu/languages/google-python-class/images/puzzle/p-babh-bajc.jpg
http://localhost/p-xxxx-yyyy.jpg
it produces the list
['http://code.google.com/edu/languages/google-python-class/images/puzzle/p-babh-bajc.jpg',
'http://code.google.com/edu/languages/google-python-class/images/puzzle/p-babf-bbac.jpg']
i.e. bajc comes before bbac.
See the comment in the code, in case you want to sort by the first key (xxxx)

How do I fix KeyError while parsing instagram?

Everyone,
I have a small script parsing names on Instagram.
Recently started having this error:
Traceback (most recent call last):
File "/home/jpegcoma/vk/posting_instagram.py", line 361, in <module>
main()
File "/home/jpegcoma/vk/posting_instagram.py", line 293, in main
table_of_content = get_stuf_from_url(urls)
File "/home/jpegcoma/vk/posting_instagram.py", line 64, in get_stuf_from_url
if json.loads(shared_data)["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_private"] == False:
KeyError: 'ProfilePage'
Currently it is running on some server. However, I tryed it on my laptop and script was working.
Here is the code that does the thing:
import requests
import json
import os
import random
from time import sleep
import time
import re
from io import BytesIO
from PIL import Image
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
file_name = 'users_names.txt'
def create_folder_photos():
if os.path.isdir(os.path.join(os.getcwd(), "photos")) == False:
os.makedirs(os.path.join(os.getcwd(), "photos"))
else:
pass
def make_list_of_users_to_scrap(file_name):
'''Opens file with instagram user_names.
Every name should be on a new line.
Prepares full URL for parsing.
Returns list URLs'''
path = os.path.join(os.getcwd(), file_name)
base_url = 'https://www.instagram.com/'
users_url_dic = []
with open(path, 'r') as file:
for name in file:
users_url_dic.append(base_url + name.rstrip() + '/')
return users_url_dic
def parsed_data(shared_data):
'''Get to ["edges"] node in shared_data from instagram'''
return json.loads(shared_data)['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']["edges"]
def get_stuf_from_url(urls):
# Open a request session
with requests.session() as s:
# Add some headers in case
s.headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
pool = ThreadPool(5)
d={}
# Go throught all the URLs on instagram
responce = pool.map(requests.get, urls)
pool.close()
pool.join()
for i in responce:
c = i.text
if 30000 < len(c) < 180000:
# Clean html, take only content of 'sharedData' part
shared_data = c.split('window._sharedData = ')[1].split(';</script>')[0]
# Check is accaunt is private
if json.loads(shared_data)["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_private"] == False:
# Go throught all the nodes:
# If video - pass.
# If photo - take {"Id":"URL"}
for node in parsed_data(shared_data)[::]:
if node['node']['is_video'] == False:
d[node['node']['id']] = node['node']['display_url']
else:
continue
else:
continue
else:
continue
return d
def check_for_new(new_data_from_request):
'''Open 'before_log.txt with previous loggs {'id':'url'}
Check if any new data is presented.
Write 2 new files:
"added.txt" - new photos with url from the last time
"before_log.txt" - updated log with all the ids and urls
returns dic with added {'id':'url'} photos'''
# Open a before_log.txt or say that no such file is presented.
if os.path.isfile(os.path.join(os.getcwd(), 'before_log.txt')):
with open(os.path.join(os.getcwd(), 'before_log.txt'), mode='r', encoding='utf8') as f_file:
before_log = json.load(f_file)
else:
print('Need to make "before_log.txt" file to use the script!!!')
# Get new data from "def get_stuf_from_url(urls):"
after = new_data_from_request
# Check if any new photos is avaliable
added = {i:after[i] for i in after if not i in before_log}
# Add new {key:value} to before_log
for key, value in after.items():
if key not in before_log.keys():
before_log[key] = value
# Write added and before_log for future use
with open(os.path.join(os.getcwd(), 'added.txt'), mode='w', encoding='utf8') as add_file:
add_file.write(json.dumps(added) + '\n')
with open(os.path.join(os.getcwd(), 'before_log.txt'), mode='w', encoding='utf8') as out_file:
out_file.write(json.dumps(before_log) + '\n')
print('We got {} new photos.'.format(len(added)))
return added
def createFilename(url, name, folder):
result = re.split(r'.jpg', url)
slashSplit = result[0].split('/')
if name == None:
name = slashSplit[-1]
ext = "jpg"
file = '{}{}.{}'.format(folder, name, ext)
return file
def getImageFast(url, name=None, folder= os.path.join(os.getcwd(), "photos/")):
'''Download new photos from instagram
Creates a photos folder'''
print("Downloading photos.....")
file = createFilename(url, name, folder)
r = requests.get(url, stream=True)
i = Image.open(BytesIO(r.content))
i.save(file)
I guess the problem is somewhere in here
if json.loads(shared_data)["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_private"] == False:
Some examples of parsed names on instagram:
_nail_ann_
alena.nails.tallinn
alyne_nails
anna_nails_erbil
aquarelle_nailstudio
cantinhodalara_nails
In a shorter version it does work as intended:
urls = 'https://www.instagram.com/_linails_/'
responce = requests.get(urls)
response_text= responce.text
shared_data = response_text.split('window._sharedData = ')[1].split(';</script>')[0]
# print(shared_data)
d={}
f = json.loads(shared_data)['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']["edges"]
for node in f[::]:
if node['node']['is_video'] == False:
d[node['node']['id']] = node['node']['display_url']
else:
continue
print (d)
After running it I'm getting all the URL and ids I need:
{
'2073876006313498489': 'https://scontent-lax3-2.cdninstagram.com/vp/6e5c8c22e54aa0c853ee88db05dc79bf/5E1BBCA4/t51.2885-15/e35/65217639_634723610367271_4450988163128206846_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=107',
'2024498169693824735': 'https://scontent-lax3-2.cdninstagram.com/vp/39188272c2305ed250ad466c7a715b91/5E2F4B15/t51.2885-15/e35/56352792_132736304460754_8293153588685230511_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=110',
'2023266828574689831': 'https://scontent-lax3-2.cdninstagram.com/vp/f313d44c5bd398a8e6b3f04fb7dbb739/5E2BBB71/t51.2885-15/e35/56578225_1055286461334820_1507399846418163801_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=104',
'2016110942668250132': 'https://scontent-lax3-2.cdninstagram.com/vp/349bbf6a920e440a4e71d5b2d149a61b/5E2BB7FE/t51.2885-15/e35/53745148_280247652888437_7055433742029015170_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=105',
'2012783478764415885': 'https://scontent-lax3-2.cdninstagram.com/vp/72dfe2f67b6dc1ea75e2ddd832384475/5E1936CE/t51.2885-15/e35/54512001_2155869857812437_3429908829670998264_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=109',
'2012464856204377926': 'https://scontent-lax3-2.cdninstagram.com/vp/5aefc3a4e047b08dc94366b0723f170d/5E32A5D3/t51.2885-15/e35/54513720_424627718315641_3423874379564248817_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=101',
'2008135031155279090': 'https://scontent-lax3-2.cdninstagram.com/vp/09cc2e7631c115a0131bda6f597dde60/5E1B4C09/t51.2885-15/e35/53156526_1025783867629475_1693464480553968728_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=111',
'2004990756607236359': 'https://scontent-lax3-2.cdninstagram.com/vp/5da04c640d70b52a3e3073667985f8e3/5E2A62EB/t51.2885-15/e35/54266355_225989821600275_560245954300705815_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=103',
'2002388991416431681': 'https://scontent-lax3-2.cdninstagram.com/vp/77bb0bf9878ca2d175dbd51350c1ef03/5E37974D/t51.2885-15/e35/53217305_581829868953428_1147405223061346025_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=108',
'2001312091952564411': 'https://scontent-lax3-2.cdninstagram.com/vp/64326e9675b389a7997ed86980cba7bc/5E30992A/t51.2885-15/e35/54513758_391705221628749_737855016941810571_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=109',
'1999425996532762294': 'https://scontent-lax3-2.cdninstagram.com/vp/4c4a5ee2b0ad46d6e3eeb1a30c1e9130/5E1BC2CA/t51.2885-15/e35/52639028_2494445767266095_4453054116414455580_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=111',
'1993652807341169347': 'https://scontent-lax3-2.cdninstagram.com/vp/d6d8ffef7fd23d1f12b14282d3bc9aca/5E17386F/t51.2885-15/e35/52024250_786523341734970_6491735451376989098_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com&_nc_cat=106'
}

Categories