How to use python urllib to search in search box? - python

I'm looking to use urllib to search in the search box of https://bigfuture.collegeboard.org
Here's what I have but it's just giving me the homepage html:
import requests
from urllib import urlopen
from urllib import urlencode
from bs4 import BeautifulSoup
url = "https://bigfuture.collegeboard.org"
data = urlencode({'q': 'financial analyst'})
results = requests.post(url, data)
soup = BeautifulSoup(results.content, 'html.parser').encode("ascii", "ignore")
output = open('text.txt','w')
output.write(soup)
How do I use and submit to the search box?

You need to include the /sitesearch endpoint in your url. If I search for "uconn", the URL that the site hits is:
https://bigfuture.collegeboard.org/sitesearch?q=uconn&searchType=bf_site&tp=bf_site
So all you need to do is change your url to:
url = "https://bigfuture.collegeboard.org/sitesearch"
Also make sure you're closing your file object or using the context manager with!!

Just use the query parameter in the semantic url
E.G.
searches = ['test','new search']
for search in searches:
search = search.replace(' ','+')
url = 'https://bigfuture.collegeboard.org/sitesearch?q=%s&searchType=bf_site&tp=bf_site' % (search)
print url
requests.get(url)

Related

How to scrape video URL from Webpage using python?

I want to download videos from a website.
Here is my code.
Every time when i run this code, it returns blank file.
Here is live code: https://colab.research.google.com/drive/19NDLYHI2n9rG6KeBCiv9vKXdwb5JL9Nb?usp=sharing
from bs4 import BeautifulSoup
import requests
url = requests.get("https://www.mxtakatak.com/xt0.3a7ed6f84ded3c0f678638602b48bb1b840bea7edb3700d62cebcf7a400d4279/video/20000kCCF0")
page = url.content
soup = BeautifulSoup(page, "html.parser")
#print(soup.prettify())
result = soup.find_all('video', class_="video-player")
print(result)
using Regex
import requests
import re
response = requests.get("....../video/20000kCCF0")
videoId = '20000kCCF0'
videos = re.findall(r'https://[^"]+' + videoId + '[^"]+mp4', response.text)
print(videos)
You always get a blank return because soup.find_all() doesn't find anything.
Maybe you should check the url.content you receive by hand and then decide what to look for with find_all()
EDIT: After digging a bit around I found out how to get the content_url_orig:
from bs4 import BeautifulSoup
import requests
import json
url = requests.get("https://www.mxtakatak.com/xt0.3a7ed6f84ded3c0f678638602b48bb1b840bea7edb3700d62cebcf7a400d4279/video/20000kCCF0")
page = url.content
soup = BeautifulSoup(page, "html.parser")
result = str(soup.find_all('script')[1]) #looking for script tag inside the html-file
result = result.split('window._state = ')[1].split("</script>']")[0].split('\n')[0]
#separating the json from the whole script-string, digged around in the file to find out how to do it
result = json.loads(result)
#navigating in the json to get the video-url
entity = list(result['entities'].items())[0][1]
download_url = entity['content_url_orig']
print(download_url)
Funny sidenote: If I read the JSON correctly you can find all videos with download-URLs the creator uploaded :)

Trouble getting some content within a box in a wbpage

I'm trying to parse the content within a box like container located at the very bottom of this website but I don't find their existence in page source. I've tried to create a script to reach them anyway.
import requests
from bs4 import BeautifulSoup
url = 'https://www.proxy-list.download/HTTPS'
r = requests.get(url)
soup = BeautifulSoup(r.text,'lxml')
item = soup.select_one("a#btn3").text
print(item)
Output I'm having:
Copy to clipboard
I'm after this:
104.248.115.236:80
104.248.53.46:3128
104.236.248.219:3128
104.248.115.236:3128
104.248.115.236:8080
104.248.184.16:8080
This is how that content is visible in that page:
Try this link https://www.proxy-list.download/api/v0/get?l=en&t=https (which you can find using dev tools) to get them all like the way I've shown below:
import requests
from bs4 import BeautifulSoup
url = 'https://www.proxy-list.download/api/v0/get?l=en&t=https'
r = requests.get(url)
for item in r.json()[0]['LISTA']:
proxy = f"{item['IP']}{':'}{item['PORT']}"
print(proxy)

Python HTML source code

I would like to write a script that picks a special point from the source code and returns it. (print it)
import urllib.request
Webseite = "http://myip.is/"
html_code = urllib.request.urlopen(Webseite)
print(html_code.read().decode('ISO-8859-1'))
This is my current code.
I would like to print only the IP address that the website gives.
The input of this I will print in python (title="copy ip address").
import requests
from bs4 import BeautifulSoup
s = requests.Session()
r = s.get('http://myip.is/')
soup = BeautifulSoup(r.text, "html5lib")
myIP = mySoup.find('a', {'title': 'copy ip address'}).text
print(myIP)
This uses the requests library (which you should always use for HTTP requests) to pull the page, feeds the content to BeautifulSoup, a very nice HTML parser, and asks BeautifulSoup to find a single <a> tag, with the atrtibuet title set to 'copy ip address', and then save the text component of that tag as myIP.
You could use jsonip which returns a JSON object that you can easily parse using standard Python library
import json
from urllib2 import urlopen
my_ip = json.load(urlopen('http://jsonip.com'))['ip']
You can use a regular expression to find the IP addresses:
import urllib.request
import re
Webseite = "http://myip.is/"
html_code = urllib.request.urlopen(Webseite)
content = html_code.read().decode('ISO-8859-1')
ip_regex = r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}'
ips_found = re.findall(ip_regex, content)
print(ips_found[0])

Python 2.7 - search for a particular URL on a webpage with ajax

I have to retrieve a URL that is nested in a webpage. I have tried the following code but it does not find the URL of the main link (to a PDF).
import urllib2
from bs4 import BeautifulSoup
url = "http://www.cmc.gv.ao/sites/main/pt/Paginas/genericFileList.aspx?mid=9&smid=69&FilterField1=TipoConteudo_x003A_Code&FilterValue1=ENTREG"
conn = urllib2.urlopen(url)
html = conn.read()
soup = BeautifulSoup(html)
links = soup.find_all('a')
for tag in links:
link = tag.get('href',None)
if link is not None:
print link
The URL I would like to find is the main link on the web page:
http://www.cmc.gv.ao/sites/main/pt/Lists/CMC%20%20PublicaesFicheiros/Attachments/89/Lista%20de%20Institui%C3%A7%C3%B5es%20Registadas%20(actualizado%2024.10.16).pdf
In the bs4 documents it says that the find_all() method looks through a tag's descendants (direct children, children of direct children and so on) and retrieves all descendants that match your filters.
How do I get the URL from the webpage?
The pdf path is retrieved using an ajax request, you need to do a bit of work to mimic the request:
import urllib2
from bs4 import BeautifulSoup
import re
url = "http://www.cmc.gv.ao/sites/main/pt/Paginas/genericFileList.aspx?mid=9&smid=69&FilterField1=TipoConteudo_x003A_Code&FilterValue1=ENTREG"
conn = urllib2.urlopen(url)
html = conn.read()
# we need to pass in the getbyid value which we parse later
attach = "http://www.cmc.gv.ao/sites/main/pt/_api/web/lists/getbyid('{}')/items(89)/AttachmentFiles"
soup = BeautifulSoup(html)
# the getbyid is contained inside a script tag, this will pull what er need from it.
patt = re.compile('ctx.editFormUrl\s+=\s+"(.*?)"')
# find that script.
scr = soup.find("script",text=re.compile("ctx.editFormUrl"))
# line we are getting looks like ctx.editFormUrl = "http://www.cmc.gv.ao/sites/main/pt/_layouts/15/listform.aspx?PageType=6&ListId=%7BC0527FB1%2D00D9%2D4BCF%2D8FFC%2DDFCAA9E9E51D%7D";
# we need the ListId
ctx = patt.search(scr.text).group(1)
# pull ListId, and pass it to url
soup2 = BeautifulSoup(urllib2.urlopen(attach.format(ctx.rsplit("=")[-1])).read())
# ^^ returns xml, we need to find the pdf path from that, it starts with /sites/main/pt/List.
pdf_path = soup2.find(text=re.compile("^/sites/main/pt/List"))
Then you need to join to the base url:
from urlparse import urljoin
# join our parsed path to the base
full_url = urljoin("http://www.cmc.gv.ao", pdf_path)
print(full_url)
we also need to quote and encode:
from urllib import quote
from urlparse import urljoin
# handle non-ascii and encode
full_url = urljoin("http://www.cmc.gv.ao", quote(pdf_path.encode("utf-8")))
And finally to write:
from urlparse import urljoin
from urllib import quote
full_url = urljoin("http://www.cmc.gv.ao", quote(pdf_path.encode("utf-8")))
from os.path import basename
with open(basename(pdf_path.encode("utf-8")), "wb") as f:
f.writelines(urllib2.urlopen(full_url))
Which will give you a pdf file called Lista de Instituições Registadas (actualizado 24.10.16).pdf
If you use requests, it does a lot of the work for you:
import requests
from bs4 import BeautifulSoup
import re
from urlparse import urljoin
from os.path import basename
url = "http://www.cmc.gv.ao/sites/main/pt/Paginas/genericFileList.aspx?mid=9&smid=69&FilterField1=TipoConteudo_x003A_Code&FilterValue1=ENTREG"
conn = requests.get(url)
html = conn.content
attach = "http://www.cmc.gv.ao/sites/main/pt/_api/web/lists/getbyid('{}')/items(89)/AttachmentFiles"
soup = BeautifulSoup(html)
links = soup.find_all('a')
patt = re.compile('ctx.editFormUrl\s+=\s+"(.*?)"')
scr = soup.find("script",text=re.compile("ctx.editFormUrl"))
ctx = patt.search(scr.text).group(1)
soup2 = BeautifulSoup(requests.get(attach.format(ctx.rsplit("=")[-1])).content)
pdf_path = soup2.find(text=re.compile("/sites/main/pt/List"))
full_url = urljoin("http://www.cmc.gv.ao", pdf_path.encode("utf-8"))
with open(basename(pdf_path.encode("utf-8")), "wb") as f:
f.writelines(requests.get(full_url))

retrieve links from web page using python and BeautifulSoup [closed]

Closed. This question needs details or clarity. It is not currently accepting answers.
Want to improve this question? Add details and clarify the problem by editing this post.
Closed 2 years ago.
Improve this question
How can I retrieve the links of a webpage and copy the url address of the links using Python?
Here's a short snippet using the SoupStrainer class in BeautifulSoup:
import httplib2
from bs4 import BeautifulSoup, SoupStrainer
http = httplib2.Http()
status, response = http.request('http://www.nytimes.com')
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
print(link['href'])
The BeautifulSoup documentation is actually quite good, and covers a number of typical scenarios:
https://www.crummy.com/software/BeautifulSoup/bs4/doc/
Edit: Note that I used the SoupStrainer class because it's a bit more efficient (memory and speed wise), if you know what you're parsing in advance.
For completeness sake, the BeautifulSoup 4 version, making use of the encoding supplied by the server as well:
from bs4 import BeautifulSoup
import urllib.request
parser = 'html.parser' # or 'lxml' (preferred) or 'html5lib', if installed
resp = urllib.request.urlopen("http://www.gpsbasecamp.com/national-parks")
soup = BeautifulSoup(resp, parser, from_encoding=resp.info().get_param('charset'))
for link in soup.find_all('a', href=True):
print(link['href'])
or the Python 2 version:
from bs4 import BeautifulSoup
import urllib2
parser = 'html.parser' # or 'lxml' (preferred) or 'html5lib', if installed
resp = urllib2.urlopen("http://www.gpsbasecamp.com/national-parks")
soup = BeautifulSoup(resp, parser, from_encoding=resp.info().getparam('charset'))
for link in soup.find_all('a', href=True):
print link['href']
and a version using the requests library, which as written will work in both Python 2 and 3:
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
import requests
parser = 'html.parser' # or 'lxml' (preferred) or 'html5lib', if installed
resp = requests.get("http://www.gpsbasecamp.com/national-parks")
http_encoding = resp.encoding if 'charset' in resp.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(resp.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, parser, from_encoding=encoding)
for link in soup.find_all('a', href=True):
print(link['href'])
The soup.find_all('a', href=True) call finds all <a> elements that have an href attribute; elements without the attribute are skipped.
BeautifulSoup 3 stopped development in March 2012; new projects really should use BeautifulSoup 4, always.
Note that you should leave decoding the HTML from bytes to BeautifulSoup. You can inform BeautifulSoup of the characterset found in the HTTP response headers to assist in decoding, but this can be wrong and conflicting with a <meta> header info found in the HTML itself, which is why the above uses the BeautifulSoup internal class method EncodingDetector.find_declared_encoding() to make sure that such embedded encoding hints win over a misconfigured server.
With requests, the response.encoding attribute defaults to Latin-1 if the response has a text/* mimetype, even if no characterset was returned. This is consistent with the HTTP RFCs but painful when used with HTML parsing, so you should ignore that attribute when no charset is set in the Content-Type header.
Others have recommended BeautifulSoup, but it's much better to use lxml. Despite its name, it is also for parsing and scraping HTML. It's much, much faster than BeautifulSoup, and it even handles "broken" HTML better than BeautifulSoup (their claim to fame). It has a compatibility API for BeautifulSoup too if you don't want to learn the lxml API.
Ian Blicking agrees.
There's no reason to use BeautifulSoup anymore, unless you're on Google App Engine or something where anything not purely Python isn't allowed.
lxml.html also supports CSS3 selectors so this sort of thing is trivial.
An example with lxml and xpath would look like this:
import urllib
import lxml.html
connection = urllib.urlopen('http://www.nytimes.com')
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath('//a/#href'): # select the url in href for all a tags(links)
print link
import urllib2
import BeautifulSoup
request = urllib2.Request("http://www.gpsbasecamp.com/national-parks")
response = urllib2.urlopen(request)
soup = BeautifulSoup.BeautifulSoup(response)
for a in soup.findAll('a'):
if 'national-park' in a['href']:
print 'found a url with national-park in the link'
The following code is to retrieve all the links available in a webpage using urllib2 and BeautifulSoup4:
import urllib2
from bs4 import BeautifulSoup
url = urllib2.urlopen("http://www.espncricinfo.com/").read()
soup = BeautifulSoup(url)
for line in soup.find_all('a'):
print(line.get('href'))
Links can be within a variety of attributes so you could pass a list of those attributes to select.
For example, with src and href attributes (here I am using the starts with ^ operator to specify that either of these attributes values starts with http):
from bs4 import BeautifulSoup as bs
import requests
r = requests.get('https://stackoverflow.com/')
soup = bs(r.content, 'lxml')
links = [item['href'] if item.get('href') is not None else item['src'] for item in soup.select('[href^="http"], [src^="http"]') ]
print(links)
Attribute = value selectors
[attr^=value]
Represents elements with an attribute name of attr whose value is prefixed (preceded) by value.
There are also the commonly used $ (ends with) and * (contains) operators. For a full syntax list see the link above.
Under the hood BeautifulSoup now uses lxml. Requests, lxml & list comprehensions makes a killer combo.
import requests
import lxml.html
dom = lxml.html.fromstring(requests.get('http://www.nytimes.com').content)
[x for x in dom.xpath('//a/#href') if '//' in x and 'nytimes.com' not in x]
In the list comp, the "if '//' and 'url.com' not in x" is a simple method to scrub the url list of the sites 'internal' navigation urls, etc.
just for getting the links, without B.soup and regex:
import urllib2
url="http://www.somewhere.com"
page=urllib2.urlopen(url)
data=page.read().split("</a>")
tag="<a href=\""
endtag="\">"
for item in data:
if "<a href" in item:
try:
ind = item.index(tag)
item=item[ind+len(tag):]
end=item.index(endtag)
except: pass
else:
print item[:end]
for more complex operations, of course BSoup is still preferred.
This script does what your looking for, But also resolves the relative links to absolute links.
import urllib
import lxml.html
import urlparse
def get_dom(url):
connection = urllib.urlopen(url)
return lxml.html.fromstring(connection.read())
def get_links(url):
return resolve_links((link for link in get_dom(url).xpath('//a/#href')))
def guess_root(links):
for link in links:
if link.startswith('http'):
parsed_link = urlparse.urlparse(link)
scheme = parsed_link.scheme + '://'
netloc = parsed_link.netloc
return scheme + netloc
def resolve_links(links):
root = guess_root(links)
for link in links:
if not link.startswith('http'):
link = urlparse.urljoin(root, link)
yield link
for link in get_links('http://www.google.com'):
print link
To find all the links, we will in this example use the urllib2 module together
with the re.module
*One of the most powerful function in the re module is "re.findall()".
While re.search() is used to find the first match for a pattern, re.findall() finds all
the matches and returns them as a list of strings, with each string representing one match*
import urllib2
import re
#connect to a URL
website = urllib2.urlopen(url)
#read html code
html = website.read()
#use re.findall to get all the links
links = re.findall('"((http|ftp)s?://.*?)"', html)
print links
Why not use regular expressions:
import urllib2
import re
url = "http://www.somewhere.com"
page = urllib2.urlopen(url)
page = page.read()
links = re.findall(r"<a.*?\s*href=\"(.*?)\".*?>(.*?)</a>", page)
for link in links:
print('href: %s, HTML text: %s' % (link[0], link[1]))
Here's an example using #ars accepted answer and the BeautifulSoup4, requests, and wget modules to handle the downloads.
import requests
import wget
import os
from bs4 import BeautifulSoup, SoupStrainer
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/eeg-mld/eeg_full/'
file_type = '.tar.gz'
response = requests.get(url)
for link in BeautifulSoup(response.content, 'html.parser', parse_only=SoupStrainer('a')):
if link.has_attr('href'):
if file_type in link['href']:
full_path = url + link['href']
wget.download(full_path)
I found the answer by #Blairg23 working , after the following correction (covering the scenario where it failed to work correctly):
for link in BeautifulSoup(response.content, 'html.parser', parse_only=SoupStrainer('a')):
if link.has_attr('href'):
if file_type in link['href']:
full_path =urlparse.urljoin(url , link['href']) #module urlparse need to be imported
wget.download(full_path)
For Python 3:
urllib.parse.urljoin has to be used in order to obtain the full URL instead.
BeatifulSoup's own parser can be slow. It might be more feasible to use lxml which is capable of parsing directly from a URL (with some limitations mentioned below).
import lxml.html
doc = lxml.html.parse(url)
links = doc.xpath('//a[#href]')
for link in links:
print link.attrib['href']
The code above will return the links as is, and in most cases they would be relative links or absolute from the site root. Since my use case was to only extract a certain type of links, below is a version that converts the links to full URLs and which optionally accepts a glob pattern like *.mp3. It won't handle single and double dots in the relative paths though, but so far I didn't have the need for it. If you need to parse URL fragments containing ../ or ./ then urlparse.urljoin might come in handy.
NOTE: Direct lxml url parsing doesn't handle loading from https and doesn't do redirects, so for this reason the version below is using urllib2 + lxml.
#!/usr/bin/env python
import sys
import urllib2
import urlparse
import lxml.html
import fnmatch
try:
import urltools as urltools
except ImportError:
sys.stderr.write('To normalize URLs run: `pip install urltools --user`')
urltools = None
def get_host(url):
p = urlparse.urlparse(url)
return "{}://{}".format(p.scheme, p.netloc)
if __name__ == '__main__':
url = sys.argv[1]
host = get_host(url)
glob_patt = len(sys.argv) > 2 and sys.argv[2] or '*'
doc = lxml.html.parse(urllib2.urlopen(url))
links = doc.xpath('//a[#href]')
for link in links:
href = link.attrib['href']
if fnmatch.fnmatch(href, glob_patt):
if not href.startswith(('http://', 'https://' 'ftp://')):
if href.startswith('/'):
href = host + href
else:
parent_url = url.rsplit('/', 1)[0]
href = urlparse.urljoin(parent_url, href)
if urltools:
href = urltools.normalize(href)
print href
The usage is as follows:
getlinks.py http://stackoverflow.com/a/37758066/191246
getlinks.py http://stackoverflow.com/a/37758066/191246 "*users*"
getlinks.py http://fakedomain.mu/somepage.html "*.mp3"
There can be many duplicate links together with both external and internal links. To differentiate between the two and just get unique links using sets:
# Python 3.
import urllib
from bs4 import BeautifulSoup
url = "http://www.espncricinfo.com/"
resp = urllib.request.urlopen(url)
# Get server encoding per recommendation of Martijn Pieters.
soup = BeautifulSoup(resp, from_encoding=resp.info().get_param('charset'))
external_links = set()
internal_links = set()
for line in soup.find_all('a'):
link = line.get('href')
if not link:
continue
if link.startswith('http'):
external_links.add(link)
else:
internal_links.add(link)
# Depending on usage, full internal links may be preferred.
full_internal_links = {
urllib.parse.urljoin(url, internal_link)
for internal_link in internal_links
}
# Print all unique external and full internal links.
for link in external_links.union(full_internal_links):
print(link)
import urllib2
from bs4 import BeautifulSoup
a=urllib2.urlopen('http://dir.yahoo.com')
code=a.read()
soup=BeautifulSoup(code)
links=soup.findAll("a")
#To get href part alone
print links[0].attrs['href']

Categories