Imgur link scraping from subreddits in Python - python

So far my code successfully manages to lift the HTML code from the 5 results it picks up when given the name of a subreddit. Now I want to do a search for the the imgur links, whether it's for an album, containing /a/ or a single image. I want to then lift this link and send it to another class (imgurdl)
What is the best way, given my current code?
from bs4 import BeautifulSoup
import praw
from urllib2 import urlopen
import urllib2
import sys
from urlparse import urljoin
import config
import imgurdl
import requests
cache = []
soup = BeautifulSoup
def reddit_login():
r = praw.Reddit(username = USER,
password = config.password,
client_id = config.client_id,
client_secret = config.client_secret,
user_agent = " v0.3"
)
print("***********logged in successfully***********")
return r
def get_category_links(subredditName, r):
print("Grabbing subreddit...")
submissions = r.subreddit(subredditName).hot(limit=5)
print("Grabbing comments...")
#comments = subred.comments(limit = 200)
for submission in submissions:
htmlSource = requests.get(submission.url).text
print (htmlSource)
r = reddit_login()
get_category_links(sys.argv[1], r)

You can get the url from PRAW and then check to see if it is from imgur within the loop itself and then send it to the appropriate function. This way there would be no need to through the html source.
for submission in submissions:
link = submission.url
if "imgur.com/a/" in link:
#Send to imgur album downloader
elif link.endswith(".jpg") or link.endswith(".png"):
#Sent to image downloader
elif "imgur.com/" in link:
#Send to single image imgur downloader

Related

How to use cookies on session?

I have a script that needs to find elements via HTML, but when it access to the main page, this page shows up: https://gyazo.com/84d0e5b7a73c97db5b780f18d0ba3f89
My questions are these:
How can I bypass it?
How can I get cookies via cfscrape.create_scraper() or requests.session()?
my script:
import datetime
import bs4
import cfscrape
s = cfscrape.create_scraper()
url = str(input("["+str(datetime.datetime.now())+"]"+" [INPUT] > URL # "))
product = s.get(url, headers=headers, allow_redirects=True)
soup = bs4.BeautifulSoup(product.text,"html.parser")

Retrieving a form results with requests

I want to submit a multipart/form-data that sets the input for a simulation on TRILEGAL, and download the file available from a redirected page.
I studied documentation of requests, urllib, Grab, mechanize, etc. , and it seems that in mechanize my code would be :
from mechanize import Browser
browser = Browser()
browser.open("http://stev.oapd.inaf.it/cgi-bin/trilegal")
browser.select_form(nr=0)
browser['gal_coord'] = ["2"]
browser['eq_alpha'] = ["277.981111"]
browser['eq_delta'] = ["-19.0833"]
response = browser.submit()
content = response.read()
However, I could not test it because it is not available in python 3.
So I tried requests :
import requests
url = 'http://stev.oapd.inaf.it/cgi-bin/trilegal'
values = {'gal_coord':"2",
'eq_alpha':"277.981111",
'eq_delta':"-19.0833",
'field':" 0.047117",
}
r = requests.post(url, files = values)
but I can't figure out how to get to the results page - if I do
r.content
it displays the content of the form that I had just submitted, whereas if you open the actual website, and click 'submit', you see a new window (following the method="post" action="./trilegal_1.6" ).
How can I get to that new window with requests (i.e. follow to the page that opens up when I click the submit button) , and click the link on the results page to retrieve the results file ( "The results will be available after about 2 minutes at THIS LINK.") ?
If you can point me to any other tool that could do the job I would be really grateful - I spent hours looking through SO for something that could help solve this problem.
Thank you!
Chris
Here is working solution for python 2.7
from mechanize import Browser
from urllib import urlretrieve # for download purpose
from bs4 import BeautifulSoup
browser = Browser()
browser.open("http://stev.oapd.inaf.it/cgi-bin/trilegal")
browser.select_form(nr=0)
browser['gal_coord'] = ["2"]
browser['eq_alpha'] = ["277.981111"]
browser['eq_delta'] = ["-19.0833"]
response = browser.submit()
content = response.read()
soup = BeautifulSoup(content, 'html.parser')
base_url = 'http://stev.oapd.inaf.it'
# fetch the url from page source and it to base url
link = soup.findAll('a')[0]['href'].split('..')[1]
url = base_url + str(link)
filename = 'test.dat'
# now download the file
urlretrieve(url, filename)
Your file will be downloaded as test.dat. You can open it with respective program.
I post a separate answer because it would be too cluttered. Thanks to #ksai, this works in python 2.7 :
import re
import time
from mechanize import Browser
browser = Browser()
browser.open("http://stev.oapd.inaf.it/cgi-bin/trilegal")
browser.select_form(nr=0)
#set appropriate form contents
browser['gal_coord'] = ["2"]
browser['eq_alpha'] = "277.981111"
browser['eq_delta'] = "-19.0833"
browser['field'] = " 0.047117"
browser['photsys_file'] = ["tab_mag_odfnew/tab_mag_lsst.dat"]
browser["icm_lim"] = "3"
browser["mag_lim"] = "24.5"
response = browser.submit()
# wait 1 min while results are prepared
time.sleep(60)
# select the appropriate url
url = 'http://stev.oapd.inaf.it/' + str(browser.links()[0].url[3:])
# download the results file
browser.retrieve(url, 'test1.dat')
Thank you very much!
Chris

python, webscraping, and writing a file

I am using 3 modules in this program, I don't know if what I'm trying to do is even possible! So I want to scrape some data off of twitter and write it in a text file using python, can somebody please guide me and tell me why my code isn't writing the data scrapped?
import urllib
import urllib.request
from os import path
from bs4 import BeautifulSoup
# here I define the url, I request the page, create my soup
theurl = "https://twitter.com/realDonaldTrump"
thepage = urllib.request.urlopen(theurl)
soup = BeautifulSoup(thepage, "html.parser")
def create_file(dest):
"""
Creates a file for the user to write data in!
:param dest:
:return:
"""
## FileName == Month_Day_Year
name = 'Data Scraped.txt'
if not(path.isfile(dest +name)):
f = open(dest + name, "w")
f.write(soup.title.text)
f.close()
if __name__ == '__main__':
destination = 'C:\\Users\\edwin\\' \
'Desktop\\WebScrappin\\'
create_file(destination)
print("Your file has been created!!")
You're only the writing the title of the document that you received.
f.write(soup.title.text)
Instead of scraping (which is against their ToS) you should gather your data from their RESTful API or use a library like Twython

searching google images from python

#!/usr/bin/env python
import urllib
import mechanize
from bs4 import BeautifulSoup
from urlparse import urlparse
def getPic(search):
search = search.replace(" ","%20")
try:
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.addheaders = [('User-Agent','Mozilla')]
htmltext = browser.open("https://www.google.com/search?site=&tbm=isch&source=hp&biw=1855&bih=990&q=" + search + "&oq=" +search)
img_url = []
formatted_images = []
soup = BeautifulSoup(htmltext)
results = soup.findAll("a")
for r in results:
try:
if "imgres?imgurl" in r['href']:
img_url.append(r['href'])
except:
a=0
for im in img_url:
refer_url = urlparse(str(img_url[0]))
return refer_url.query.split("&")[0].replace("imgurl=","")
return formatted_images
except:
print "error"
print getPic("occupy wall street")
Instead of getting the link of an image as output I'm getting "[]" as an output.Can someone figure out what's the problem with my code.
Google sends "imgres?imgurl" only to browser with JavaScript
but mechanize.Browser() is like browser without JavaScript.
Turn off JavaScript in your browser and see HTML send by Google.

How can I get the final redirect URL, including the path, in Python? (urllib2.urlopen().geturl() isn't doing it) [duplicate]

Python's urllib2 follows 3xx redirects to get the final content. Is there a way to make urllib2 (or some other library such as httplib2) also follow meta refreshes? Or do I need to parse the HTML manually for the refresh meta tags?
Here is a solution using BeautifulSoup and httplib2 (and certificate based authentication):
import BeautifulSoup
import httplib2
def meta_redirect(content):
soup = BeautifulSoup.BeautifulSoup(content)
result=soup.find("meta",attrs={"http-equiv":"Refresh"})
if result:
wait,text=result["content"].split(";")
if text.strip().lower().startswith("url="):
url=text.strip()[4:]
return url
return None
def get_content(url, key, cert):
h=httplib2.Http(".cache")
h.add_certificate(key,cert,"")
resp, content = h.request(url,"GET")
# follow the chain of redirects
while meta_redirect(content):
resp, content = h.request(meta_redirect(content),"GET")
return content
A similar solution using the requests and lxml libraries. Also does a simple check that the thing being tested is actually HTML (a requirement in my implementation). Also is able to capture and use cookies by using the request library's sessions (sometimes necessary if redirection + cookies are being used as an anti-scraping mechanism).
import magic
import mimetypes
import requests
from lxml import html
from urlparse import urljoin
def test_for_meta_redirections(r):
mime = magic.from_buffer(r.content, mime=True)
extension = mimetypes.guess_extension(mime)
if extension == '.html':
html_tree = html.fromstring(r.text)
attr = html_tree.xpath("//meta[translate(#http-equiv, 'REFSH', 'refsh') = 'refresh']/#content")[0]
wait, text = attr.split(";")
if text.lower().startswith("url="):
url = text[4:]
if not url.startswith('http'):
# Relative URL, adapt
url = urljoin(r.url, url)
return True, url
return False, None
def follow_redirections(r, s):
"""
Recursive function that follows meta refresh redirections if they exist.
"""
redirected, url = test_for_meta_redirections(r)
if redirected:
r = follow_redirections(s.get(url), s)
return r
Usage:
s = requests.session()
r = s.get(url)
# test for and follow meta redirects
r = follow_redirections(r, s)
OK, seems no library supports it so I have been using this code:
import urllib2
import urlparse
import re
def get_hops(url):
redirect_re = re.compile('<meta[^>]*?url=(.*?)["\']', re.IGNORECASE)
hops = []
while url:
if url in hops:
url = None
else:
hops.insert(0, url)
response = urllib2.urlopen(url)
if response.geturl() != url:
hops.insert(0, response.geturl())
# check for redirect meta tag
match = redirect_re.search(response.read())
if match:
url = urlparse.urljoin(url, match.groups()[0].strip())
else:
url = None
return hops
If you dont want to use bs4 ,you can use lxml like this:
from lxml.html import soupparser
def meta_redirect(content):
root = soupparser.fromstring(content)
result_url = root.xpath('//meta[#http-equiv="refresh"]/#content')
if result_url:
result_url = str(result_url[0])
urls = result_url.split('URL=') if len(result_url.split('url=')) < 2 else result_url.split('url=')
url = urls[1] if len(urls) >= 2 else None
else:
return None
return url
Use BeautifulSoup or lxml to parse the HTML.

Categories