xml.parsers.expat.ExpatError: not well-formed (invalid token) python3? - python

I have this code on my python3 for E2 (dreambox)
from xml.dom import Node, minidom
from urllib.request import urlopen, Request
selectedserverurl = 'http://fairbird.liveblog365.com/TSpanel/TSipanel.xml'
def downloadxmlpage():
req = Request(selectedserverurl)
response = urlopen(req)
data = response.read()
response.close()
print("data:",data)
gotPageLoad(data)
print("gotPageLoad(data):", gotPageLoad(data))
def gotPageLoad(data = None):
if data != None:
xmlparse = minidom.parseString(data)
for plugins in xmlparse.getElementsByTagName('plugins'):
item = plugins.getAttribute('cont')
if 'TSpanel' in item:
for plugin in plugins.getElementsByTagName('plugin'):
tsitem = plugin.getAttribute('name')
print("tsitem:", tsitem)
downloadxmlpage()
I have try to read this file and extract the content from it
http://fairbird.liveblog365.com/TSpanel/TSipanel.xml
But I have got this error !!
data: b'<html><body><script type="text/javascript" src="/aes.js" ></script><script>function toNumbers(d){var e=[];d.replace(/(..)/g,function(d){e.push(parseInt(d,16))});return e}function toHex(){for(var d=[],d=1==arguments.length&&arguments[0].constructor==Array?arguments[0]:arguments,e="",f=0;f<d.length;f++)e+=(16>d[f]?"0":"")+d[f].toString(16);return e.toLowerCase()}var a=toNumbers("f655ba9d09a112d4968c63579db590b4"),b=toNumbers("98344c2eee86c3994890592585b49f80"),c=toNumbers("55cc7e99e3f798b6063f25e8b0f8aa76");document.cookie="__test="+toHex(slowAES.decrypt(c,2,a,b))+"; expires=Thu, 31-Dec-37 23:55:55 GMT; path=/"; location.href="http://fairbird.liveblog365.com/TSpanel/TSipanel.xml?i=1";</script><noscript>This site requires Javascript to work, please enable Javascript in your browser or use a browser with Javascript support</noscript></body></html>'
Traceback (most recent call last):
File "/home/raed/Desktop/test.py", line 24, in <module>
downloadxmlpage()
File "/home/raed/Desktop/test.py", line 11, in downloadxmlpage
gotPageLoad(data)
File "/home/raed/Desktop/test.py", line 16, in gotPageLoad
xmlparse = minidom.parseString(data)
File "/usr/lib/python3.10/xml/dom/minidom.py", line 2000, in parseString
return expatbuilder.parseString(string)
File "/usr/lib/python3.10/xml/dom/expatbuilder.py", line 925, in parseString
return builder.parseString(string)
File "/usr/lib/python3.10/xml/dom/expatbuilder.py", line 223, in parseString
parser.Parse(string, True)
xml.parsers.expat.ExpatError: not well-formed (invalid token): line 1, column 222
So How to solve this issue ?!!

Your data output is HTML, not an XML file, therefore the parser is failing.
The HTML redirects to http://fairbird.liveblog365.com/TSpanel/TSipanel.xml?i=1 using Javascript, as shown - This site requires Javascript to work.
This is typically done to prevent anyone from scraping the page/server-files.

Related

Beautiful Soup Traceback on First Attempt

Hello I'm new to python and Beautiful Soup. I have downloaded BS4 with pip install and am attempting to do some web scaping. I have looked through a lot of help guides and haven't been able to get my BeautifulSoup() to work through the cmd compiler. Here is my code:
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
tags = soup('a')
for tag in tags:
print(tag.get('href', None))
This is the traceback I get with an URL input:
C:\Users\aaron\OneDrive\Desktop\Coding>python urllinks_get.py
Enter - http://www.dr-chuck.com/page1.htm
Traceback (most recent call last):
File "C:\Users\aaron\OneDrive\Desktop\Coding\urllinks_get.py", line 21, in <module>
soup = BeautifulSoup(html, 'html.parser')
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\__init__.py", line 215, in __init__
self._feed()
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\__init__.py", line 239, in _feed
self.builder.feed(self.markup)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\builder\_htmlparser.py", line 164, in feed
parser.feed(markup)
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2032.0_x64__qbz5n2kfra8p0\lib\html\parser.py", line 110, in feed
self.goahead(0)
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2032.0_x64__qbz5n2kfra8p0\lib\html\parser.py", line 170, in goahead
k = self.parse_starttag(i)
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2032.0_x64__qbz5n2kfra8p0\lib\html\parser.py", line 344, in parse_starttag
self.handle_starttag(tag, attrs)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\builder\_htmlparser.py", line 62, in handle_starttag
self.soup.handle_starttag(name, None, None, attr_dict)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\__init__.py", line 404, in handle_starttag
self.currentTag, self._most_recent_element)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\element.py", line 1001, in __getattr__
return self.find(tag)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\element.py", line 1238, in find
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\element.py", line 1259, in find_all
return self._find_all(name, attrs, text, limit, generator, **kwargs)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\element.py", line 516, in _find_all
strainer = SoupStrainer(name, attrs, text, **kwargs)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\element.py", line 1560, in __init__
self.text = self._normalize_search_value(text)
File "C:\Users\aaron\OneDrive\Desktop\Coding\bs4\element.py", line 1565, in _normalize_search_value
if (isinstance(value, str) or isinstance(value, collections.Callable) or hasattr(value, 'match')
AttributeError: module 'collections' has no attribute 'Callable'
Really would like to continue my online classes so any help would be much appreciated!
Thanks!
Found my issue. I had installed beautifulsoup4 as well as used the bs4 folder in the same directory as my program ran in. I didn't realize they would interfere with one another. As soon as I removed the bs4 folder from the directory my program ran fine :)

BeautifulSoup timing out on instantiation?

I'm just doing some web scraping with BeautifulSoup and I'm running into a weird error. Code:
print "Running urllib2"
g = urllib2.urlopen(link + "about", timeout=5)
print "Finished urllib2"
about_soup = BeautifulSoup(g, 'lxml')
Here's the output:
Running urllib2
Finished urllib2
Error
Traceback (most recent call last):
File "/Users/pspieker/Documents/projects/ThePyStrikesBack/tests/TestSpringerOpenScraper.py", line 10, in test_strip_chars
for row in self.instance.get_entries():
File "/Users/pspieker/Documents/projects/ThePyStrikesBack/src/JournalScrapers.py", line 304, in get_entries
about_soup = BeautifulSoup(g, 'lxml')
File "/Users/pspieker/.virtualenvs/thepystrikesback/lib/python2.7/site-packages/bs4/__init__.py", line 175, in __init__
markup = markup.read()
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/socket.py", line 355, in read
data = self._sock.recv(rbufsize)
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/httplib.py", line 588, in read
return self._read_chunked(amt)
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/httplib.py", line 648, in _read_chunked
value.append(self._safe_read(amt))
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/httplib.py", line 703, in _safe_read
chunk = self.fp.read(min(amt, MAXAMOUNT))
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/socket.py", line 384, in read
data = self._sock.recv(left)
timeout: timed out
I understand that the urllib2.urlopen could be causing problems, but the exception occurs in the line instantiating BeautifulSoup. I did some googling but couldn't find anything about BeautfiulSoup timeout issues.
Any ideas on what is happening?
This is urllib2 part that causing the timeout.
The reason you see it is failing on the BeautifulSoup instantiation line is that g, the file-like object, is being read by BeautifulSoup internally. This is the part of the stacktrace proving that:
File "/Users/pspieker/.virtualenvs/thepystrikesback/lib/python2.7/site-packages/bs4/__init__.py", line 175, in __init__
markup = markup.read()

python script to scan a pdf file using online scanner

I used this code to scan multiple PDF files contained in a folder with the online scanner "https://wepawet.iseclab.org/" using this scrip.
import mechanize
import re
import os
def upload_file(uploaded_file):
url = "https://wepawet.iseclab.org/"
br = mechanize.Browser()
br.set_handle_robots(False) # ignore robots
br.open(url)
br.select_form(nr=0)
f = os.path.join("200",uploaded_file)
br.form.add_file(open(f) ,'text/plain', f)
br.form.set_all_readonly(False)
res = br.submit()
content = res.read()
with open("200_clean.html", "a") as f:
f.write(content)
def main():
for file in os.listdir("200"):
upload_file(file)
if __name__ == '__main__':
main()
but after the execution of the code I got the following error:
Traceback (most recent call last):
File "test.py", line 56, in <module>
main()
File "test.py", line 50, in main
upload_file(file)
File "test.py", line 40, in upload_file
res = br.submit()
File "/home/suleiman/Desktop/mechanize/_mechanize.py", line 541, in submit
return self.open(self.click(*args, **kwds))
File "/home/suleiman/Desktop/mechanize/_mechanize.py", line 203, in open
return self._mech_open(url, data, timeout=timeout)
File "/home/suleiman/Desktop/mechanize/_mechanize.py", line 255, in _mech_open
raise response
mechanize._response.httperror_seek_wrapper: HTTP Error refresh: The HTTP server returned a redirect error that would lead to an infinite loop.
The last 30x error message was:
OK
could any one help me with this problem ?
I think the issue is the mime-type text/plain you set. For PDF, this should be application/pdf. Your code with this change worked for me when I uploaded a sample PDF.
Change the br.form.add_file call to look like this:
br.form.add_file(open(f), 'application/pdf', f)

requests: TypeError: 'tuple' object is not callable in python 3.1.2

I'm making a web page scraper using BeautifulSoup4 and requests libraries. I had some trouble with BeautifulSoup working but got some help and was able to get that fixed. Now I've run into a new problem and I'm not sure how to fix it. I'm using requests 2.2.1 and I'm trying to run this program on Python 3.1.2. And when I do I get a traceback error.
here is my code:
from bs4 import BeautifulSoup
import requests
url = input("Enter a URL (start with www): ")
link = "http://" + url
page = requests.get(link).content
soup = BeautifulSoup(page)
for url in soup.find_all('a'):
print(url.get('href'))
print()
and the error:
Enter a URL (start with www): www.google.com
Traceback (most recent call last):
File "/Users/user/Desktop/project.py", line 8, in <module>
page = requests.get(link).content
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/api.py", line 55, in get
return request('get', url, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/api.py", line 44, in request
return session.request(method=method, url=url, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/sessions.py", line 349, in request
prep = self.prepare_request(req)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/sessions.py", line 287, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/models.py", line 287, in prepare
self.prepare_url(url,params)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/models.py", line 321, in prepare_url
url = str(url)
TypeError: 'tuple' object is not callable
I've done some looking and when others have gotten this error (in django mostly) there was a comma missing but I'm not sure where to put a comma at? Any help will be appreciated.

What is the best way to handle a bad link given to BeautifulSoup?

I'm working on something that pulls in urls from delicious and then uses those urls to discover associated feeds.
However, some of the bookmarks in delicious are not html links and cause BS to barf. Basically, I want to throw away a link if BS fetches it and it does not look like html.
Right now, this is what I'm getting.
trillian:Documents jauderho$ ./d2o.py "green data center"
processing http://www.greenm3.com/
processing http://www.eweek.com/c/a/Green-IT/How-to-Create-an-EnergyEfficient-Green-Data-Center/?kc=rss
Traceback (most recent call last):
File "./d2o.py", line 53, in <module>
get_feed_links(d_links)
File "./d2o.py", line 43, in get_feed_links
soup = BeautifulSoup(html)
File "/Library/Python/2.5/site-packages/BeautifulSoup.py", line 1499, in __init__
BeautifulStoneSoup.__init__(self, *args, **kwargs)
File "/Library/Python/2.5/site-packages/BeautifulSoup.py", line 1230, in __init__
self._feed(isHTML=isHTML)
File "/Library/Python/2.5/site-packages/BeautifulSoup.py", line 1263, in _feed
self.builder.feed(markup)
File "/System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/HTMLParser.py", line 108, in feed
self.goahead(0)
File "/System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/HTMLParser.py", line 150, in goahead
k = self.parse_endtag(i)
File "/System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/HTMLParser.py", line 314, in parse_endtag
self.error("bad end tag: %r" % (rawdata[i:j],))
File "/System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/HTMLParser.py", line 115, in error
raise HTMLParseError(message, self.getpos())
HTMLParser.HTMLParseError: bad end tag: u'</b />', at line 739, column 1
Update:
Jehiah's answer does the trick. For reference, here's some code to get the content type:
def check_for_html(link):
out = urllib.urlopen(link)
return out.info().getheader('Content-Type')
I simply wrap my BeautifulSoup processing and look for the HTMLParser.HTMLParseError exception
import HTMLParser,BeautifulSoup
try:
soup = BeautifulSoup.BeautifulSoup(raw_html)
for a in soup.findAll('a'):
href = a.['href']
....
except HTMLParser.HTMLParseError:
print "failed to parse",url
but further than that, you can check the content type of the responses when you crawl a page and make sure that it's something like text/html or application/xml+xhtml or something like that before you even try to parse it. That should head off most errors.

Categories