How to load user CSS in a WebKit WebView using PyObjC? - python

I'd like to have a small browser that uses my own CSS.
The problem is that CSS is not loaded or, I guess, it loads but without any effect.
Here is the full code (I don't use an Interface Builder):
import Foundation
import WebKit
import AppKit
import objc
def main():
app = AppKit.NSApplication.sharedApplication()
rect = Foundation.NSMakeRect(100,350,600,800)
win = AppKit.NSWindow.alloc()
win.initWithContentRect_styleMask_backing_defer_(rect, AppKit.NSTitledWindowMask | AppKit.NSClosableWindowMask | AppKit.NSResizableWindowMask | AppKit.NSMiniaturizableWindowMask, AppKit.NSBackingStoreBuffered, False)
win.display()
win.orderFrontRegardless()
webview = WebKit.WebView.alloc()
webview.initWithFrame_(rect)
webview.preferences().setUserStyleSheetEnabled_(objc.YES)
print webview.preferences().userStyleSheetEnabled()
cssurl = Foundation.NSURL.URLWithString_("http://dev.stanpol.ru/user.css")
webview.preferences().setUserStyleSheetLocation_(cssurl)
print webview.preferences().userStyleSheetLocation()
pageurl = Foundation.NSURL.URLWithString_("http://dev.stanpol.ru/index.html")
req = Foundation.NSURLRequest.requestWithURL_(pageurl)
webview.mainFrame().loadRequest_(req)
win.setContentView_(webview)
app.run()
if __name__ == '__main__':
main()
The code runs without errors. Prints
True
http://dev.stanpol.ru/user.css
but there is no effect of my CSS in the WebView.
I tried different solutions like adding a link to the DOM:
pageurl = Foundation.NSURL.URLWithString_("http://dev.stanpol.ru/index.html")
req = Foundation.NSURLRequest.requestWithURL_(pageurl)
webview.mainFrame().loadRequest_(req)
dom = webview.mainFrame().DOMDocument()
link = dom.createElement_("link")
link.setAttribute_value_("rel", "StyleSheet")
link.setAttribute_value_("type", "text/css")
link.setAttribute_value_("href", "http://dev.stanpol.ru/user.css")
head = dom.getElementsByTagName_(u"head")
hf = head.item_(0)
hf.appendChild_(link)
but in either case it doesn't work.
I don't want to use javascript to load CSS.
Can anybody tell me what's wrong with setting user CSS in my code?

Apple doesn't properly document that setUserStyleSheetLocation can only be a local path or data: URL. Qt does explain this in their documentation of the corresponding setting in QtWebKit:
http://doc.qt.nokia.com/latest/qwebsettings.html#setUserStyleSheetUrl
Specifies the location of a user stylesheet to load with every web
page.
The location must be either a path on the local filesystem, or a
data URL with UTF-8 and Base64 encoded data, such as:
"data:text/css;charset=utf-8;base64,cCB7IGJhY2tncm91bmQtY29sb3I6IHJlZCB9Ow=="
Note: If the base64 data is not valid, the style will not be applied.

Related

why is my self-hosting Flet web app stucking in 'please wait until the application is being started'?

my python code:
import flet as ft
import os
# set Flet path to an empty string to serve at the root URL (e.g., https://lizards.ai/)
# or a folder/path to serve beneath the root (e.g., https://lizards.ai/ui/path
DEFAULT_FLET_PATH = 'http:/ip/' # or 'ui/path'
DEFAULT_FLET_PORT = 8502
def main(page: ft.Page):
page.title = "You Enjoy Mychatbot"
page.add(ft.Text("Reba put a stopper in the bottom of the tub"))
if __name__ == "__main__":
flet_path = os.getenv("FLET_PATH", DEFAULT_FLET_PATH)
flet_port = int(os.getenv("FLET_PORT", DEFAULT_FLET_PORT))
ft.app(name=flet_path, target=main, view=None, port=flet_port)
When I load the web app after showing the Flet icon, it gets stuck on "Please wait for the app to start"
The flet_path provided as the name argument is meant to be a path beneath the root, bearing in mind that flet's default URL strategy uses a hash as root. So, for example, if you provide test/path as the DEFAULT_FLET_PATH in your code above, then run the code locally, it should load at http://localhost:8502/test/path/#/
You've provided http:/ip/ as a path but I think that is likely generating some error that is getting swallowed since a colon is invalid as part of a URL path.

Python script for "Google search by image"

I have checked Google Search API's and it seems that they have not released any API for searching "Images". So, I was wondering if there exists a python script/library through which I can automate the "search by image feature".
This was annoying enough to figure out that I thought I'd throw a comment on the first python-related stackoverflow result for "script google image search". The most annoying part of all this is setting up your proper application and custom search engine (CSE) in Google's web UI, but once you have your api key and CSE, define them in your environment and do something like:
#!/usr/bin/env python
# save top 10 google image search results to current directory
# https://developers.google.com/custom-search/json-api/v1/using_rest
import requests
import os
import sys
import re
import shutil
url = 'https://www.googleapis.com/customsearch/v1?key={}&cx={}&searchType=image&q={}'
apiKey = os.environ['GOOGLE_IMAGE_APIKEY']
cx = os.environ['GOOGLE_CSE_ID']
q = sys.argv[1]
i = 1
for result in requests.get(url.format(apiKey, cx, q)).json()['items']:
link = result['link']
image = requests.get(link, stream=True)
if image.status_code == 200:
m = re.search(r'[^\.]+$', link)
filename = './{}-{}.{}'.format(q, i, m.group())
with open(filename, 'wb') as f:
image.raw.decode_content = True
shutil.copyfileobj(image.raw, f)
i += 1
There is no API available but you are can parse the page and imitate the browser, but I don't know how much data you need to parse because google may limit or block access.
You can imitate the browser by simply using urllib and setting correct headers, but if you think parsing complex web-pages may be difficult from python, you can directly use a headless browser like phontomjs, inside a browser it is trivial to get correct elements using javascript/DOM
Note before trying all this check google's TOS
You can try this:
https://developers.google.com/image-search/v1/jsondevguide#json_snippets_python
It's deprecated, but seems to work.

How to display a page in my browser with python code that is run locally on my computer with "GAE" SDK?

When I run this code on my computer with the help of "Google App Engine SDK", it displays (in my browser) the HTML code of the Google home page:
from google.appengine.api import urlfetch
url = "http://www.google.com/"
result = urlfetch.fetch(url)
print result.content
How can I make it display the page itself? I mean I want to see that page in my browser the way it would normally be seen by any user of the internet.
Update 1:
I see I have received a few questions that look a bit complicated to me, although I definitely remember I was able to do it, and it was very simple, except i don't remember what exactly i changed then in this code.
Perhaps, I didn't give You all enough details on how I run this code and where I found it. So, let me tell You what I did. I only installed Python 2.5 on my computer and then downloaded "Google App Engine SDK" and installed it, too. Following the instructions on "GAE" page (http://code.google.com/appengine/docs/python/gettingstarted/helloworld.html) I created a directory and named it “My_test”, then I created a “my_test.py” in it containing that small piece of the code that I mentioned in my question.
Then, continuing to follow on the said instructions, I created an “app.yaml” file in it, in which my “my_test.py” file was mentioned. After that in “Google App Engine Launcher” I found “My_test” directory and clicked on Run button, and then on Browse. Then, having visited this URL http://localhost:8080/ in my web browser, I saw the results.
I definitely remember I was able to display any page in my browser in this way, and it was very simple, except I don’t remember what exactly I changed in the code (it was a slight change). Now, all I can see is a raw HTML code of a page, but not a page itself.
Update 2:
(this update is my response to wescpy)
Hello, wescpy!!! I've tried Your updated code and something didn't work well there. Perhaps, it's because I am not using a certain framework that I am supposed to use for this code. Please, take a look at this screen shot (I guess You'll need to right-click this image to see it in better resolution):
(source: narod.ru)
Is not that easy, you have to parse content and adjust relative to absolute paths for images and javascripts.
Anyway, give it a try adding the correct Content-Type:
from google.appengine.api import urlfetch
url = "http://www.google.com/"
result = urlfetch.fetch(url)
print 'Content-Type: text/html'
print ''
print result.content
a more complete example would look something like this:
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
class MainHandler(webapp.RequestHandler):
def get(self):
url = "http://www.google.com/"
result = urlfetch.fetch(url)
self.response.out.write(result.content)
application = webapp.WSGIApplication([
('/', MainHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
but as others' have said, it's not that easy to do because you're not in the server's domain, meaning the pages will likely not look correct due to missing static content (JS, CSS, and/or images)... unless full pathnames are used or everything that's needed is embedded into the page itself.
UPDATE 1:
as mentioned before, you cannot just download the HTML source and expect things to render correctly because you don't necessarily have access to the static data. if you really want to render it as it was meant to be seen, you have to just redirect... here's the modified piece of code:
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
class MainHandler(webapp.RequestHandler):
def get(self):
url = "http://www.google.com/"
self.redirect(url)
application = webapp.WSGIApplication([
('/', MainHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
UPDATE 2:
sorry! it was a cut-n-paste error. now try it.
special characters such as <> etc are likely encoded, you'd have to decode them again for the browser to interpet it as code.

Using Python/Selenium/Best Tool For The Job to get URI of image requests generated through JavaScript?

I have some JavaScript from a 3rd party vendor that is initiating an image request. I would like to figure out the URI of this image request.
I can load the page in my browser, and then monitor "Live HTTP Headers" or "Tamper Data" in order to figure out the image request URI, but I would prefer to create a command line process to do this.
My intuition is that it might be possible using python + qtwebkit, but perhaps there is a better way.
To clarify: I might have this (overly simplified code).
<script>
suffix = magicNumberFunctionIDontHaveAccessTo();
url = "http://foobar.com/function?parameter=" + suffix
img = document.createElement('img'); img.src=url; document.all.body.appendChild(img);
</script>
Then once the page is loaded, I can go figure out the url by sniffing the packets. But I can't just figure it out from the source, because I can't predict the outcome of magicNumberFunction...().
Any help would be muchly appreciated!
Thank you.
The simplest thing to do might be to use something like HtmlUnit and skip a real browser entirely. By using Rhino, it can evaluate JavaScript and likely be used to extract that URL out.
That said, if you can't get that working, try out Selenium RC and use the captureNetworkTraffic command (which requires the Selenium instant be started with an option of captureNetworkTraffic=true). This will launch Firefox with a proxy configured and then let you pull the request info back out as JSON/XML/plain text. Then you can parse that content and get what you want.
Try out the instant test tool that my company offers. If the data you're looking for is in our results (after you click View Details), you'll be able to get it from Selenium. I know, since I wrote the captureNetworkTraffic API for Selenium for my company, BrowserMob.
I would pick any one of the many http proxy servers written in Python -- probably one of the simplest ones at the very top of the list -- and tweak it to record all URLs requested (as well as proxy-serve them) e.g. appending them to a text file -- without loss of generality, call that text file 'XXX.txt'.
Now all you need is a script that: starts the proxy server in question; starts Firefox (or whatever) on your main desired URL with the proxy in question set as your proxy (see e.g. this SO question for how), though I'm sure other browsers would work just as well; waits a bit (e.g. until the proxy's XXX.txt file has not been altered for more than N seconds); reads XXX.txt to extract only the URLs you care about and record them wherever you wish; turns down the proxy and Firefox processes.
I think this will be much faster to put in place and make work correctly, for your specific requirements, than any more general solution based on qtwebkit, selenium, or other "automation kits".
Use Firebug Firefox plugin. It will show you all requests in real time and you can even debug the JS in your Browser or run it step-by-step.
Ultimately, I did it in python, using Selenium-RC. This solution requires the python files for selenium-rc, and you need to start the java server ("java -jar selenium-server.jar")
from selenium import selenium
import unittest
import lxml.html
class TestMyDomain(unittest.TestCase):
def setUp(self):
self.selenium = selenium("localhost", \
4444, "*firefox", "http://www.MyDomain.com")
self.selenium.start()
def test_mydomain(self):
htmldoc = open('site-list.html').read()
url_list = [link for (element, attribute,link,pos) in lxml.html.iterlinks(htmldoc)]
for url in url_list:
try:
sel = self.selenium
sel.open(url)
sel.select_window("null")
js_code = '''
myDomainWindow = this.browserbot.getUserWindow();
for(obj in myDomainWindow) {
/* This code grabs the OMNITURE tracking pixel img */
if ((obj.substring(0,4) == 's_i_') && (myDomainWindow[obj].src)) {
var ret = myDomainWindow[obj].src;
}
}
ret;
'''
omniture_url = sel.get_eval(js_code) #parse&process this however you want
except Exception, e:
print 'We ran into an error: %s' % (e,)
self.assertEqual("expectedValue", observedValue)
def tearDown(self):
self.selenium.stop()
if __name__ == "__main__":
unittest.main()
Why can't you just read suffix, or url for that matter? Is the image loaded in an iframe or in your page?
If it is loaded in your page, then this may be a dirty hack (substitute document.body for whatever element is considered):
var ac = document.body.appendChild;
var sources = [];
document.body.appendChild = function(child) {
if (/^img$/i.test(child.tagName)) {
sources.push(child.getAttribute('src'));
}
ac(child);
}

How can I take a screenshot/image of a website using Python?

What I want to achieve is to get a website screenshot from any website in python.
Env: Linux
Here is a simple solution using webkit:
http://webscraping.com/blog/Webpage-screenshots-with-webkit/
import sys
import time
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
class Screenshot(QWebView):
def __init__(self):
self.app = QApplication(sys.argv)
QWebView.__init__(self)
self._loaded = False
self.loadFinished.connect(self._loadFinished)
def capture(self, url, output_file):
self.load(QUrl(url))
self.wait_load()
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize())
# render image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
print 'saving', output_file
image.save(output_file)
def wait_load(self, delay=0):
# process app events until page loaded
while not self._loaded:
self.app.processEvents()
time.sleep(delay)
self._loaded = False
def _loadFinished(self, result):
self._loaded = True
s = Screenshot()
s.capture('http://webscraping.com', 'website.png')
s.capture('http://webscraping.com/blog', 'blog.png')
Here is my solution by grabbing help from various sources. It takes full web page screen capture and it crops it (optional) and generates thumbnail from the cropped image also. Following are the requirements:
Requirements:
Install NodeJS
Using Node's package manager install phantomjs: npm -g install phantomjs
Install selenium (in your virtualenv, if you are using that)
Install imageMagick
Add phantomjs to system path (on windows)
import os
from subprocess import Popen, PIPE
from selenium import webdriver
abspath = lambda *p: os.path.abspath(os.path.join(*p))
ROOT = abspath(os.path.dirname(__file__))
def execute_command(command):
result = Popen(command, shell=True, stdout=PIPE).stdout.read()
if len(result) > 0 and not result.isspace():
raise Exception(result)
def do_screen_capturing(url, screen_path, width, height):
print "Capturing screen.."
driver = webdriver.PhantomJS()
# it save service log file in same directory
# if you want to have log file stored else where
# initialize the webdriver.PhantomJS() as
# driver = webdriver.PhantomJS(service_log_path='/var/log/phantomjs/ghostdriver.log')
driver.set_script_timeout(30)
if width and height:
driver.set_window_size(width, height)
driver.get(url)
driver.save_screenshot(screen_path)
def do_crop(params):
print "Croping captured image.."
command = [
'convert',
params['screen_path'],
'-crop', '%sx%s+0+0' % (params['width'], params['height']),
params['crop_path']
]
execute_command(' '.join(command))
def do_thumbnail(params):
print "Generating thumbnail from croped captured image.."
command = [
'convert',
params['crop_path'],
'-filter', 'Lanczos',
'-thumbnail', '%sx%s' % (params['width'], params['height']),
params['thumbnail_path']
]
execute_command(' '.join(command))
def get_screen_shot(**kwargs):
url = kwargs['url']
width = int(kwargs.get('width', 1024)) # screen width to capture
height = int(kwargs.get('height', 768)) # screen height to capture
filename = kwargs.get('filename', 'screen.png') # file name e.g. screen.png
path = kwargs.get('path', ROOT) # directory path to store screen
crop = kwargs.get('crop', False) # crop the captured screen
crop_width = int(kwargs.get('crop_width', width)) # the width of crop screen
crop_height = int(kwargs.get('crop_height', height)) # the height of crop screen
crop_replace = kwargs.get('crop_replace', False) # does crop image replace original screen capture?
thumbnail = kwargs.get('thumbnail', False) # generate thumbnail from screen, requires crop=True
thumbnail_width = int(kwargs.get('thumbnail_width', width)) # the width of thumbnail
thumbnail_height = int(kwargs.get('thumbnail_height', height)) # the height of thumbnail
thumbnail_replace = kwargs.get('thumbnail_replace', False) # does thumbnail image replace crop image?
screen_path = abspath(path, filename)
crop_path = thumbnail_path = screen_path
if thumbnail and not crop:
raise Exception, 'Thumnail generation requires crop image, set crop=True'
do_screen_capturing(url, screen_path, width, height)
if crop:
if not crop_replace:
crop_path = abspath(path, 'crop_'+filename)
params = {
'width': crop_width, 'height': crop_height,
'crop_path': crop_path, 'screen_path': screen_path}
do_crop(params)
if thumbnail:
if not thumbnail_replace:
thumbnail_path = abspath(path, 'thumbnail_'+filename)
params = {
'width': thumbnail_width, 'height': thumbnail_height,
'thumbnail_path': thumbnail_path, 'crop_path': crop_path}
do_thumbnail(params)
return screen_path, crop_path, thumbnail_path
if __name__ == '__main__':
'''
Requirements:
Install NodeJS
Using Node's package manager install phantomjs: npm -g install phantomjs
install selenium (in your virtualenv, if you are using that)
install imageMagick
add phantomjs to system path (on windows)
'''
url = 'http://stackoverflow.com/questions/1197172/how-can-i-take-a-screenshot-image-of-a-website-using-python'
screen_path, crop_path, thumbnail_path = get_screen_shot(
url=url, filename='sof.png',
crop=True, crop_replace=False,
thumbnail=True, thumbnail_replace=False,
thumbnail_width=200, thumbnail_height=150,
)
These are the generated images:
Full web page screen
Cropped image from captured screen
Thumbnail of a cropped image
can do using Selenium
from selenium import webdriver
DRIVER = 'chromedriver'
driver = webdriver.Chrome(DRIVER)
driver.get('https://www.spotify.com')
screenshot = driver.save_screenshot('my_screenshot.png')
driver.quit()
https://sites.google.com/a/chromium.org/chromedriver/getting-started
On the Mac, there's webkit2png and on Linux+KDE, you can use khtml2png. I've tried the former and it works quite well, and heard of the latter being put to use.
I recently came across QtWebKit which claims to be cross platform (Qt rolled WebKit into their library, I guess). But I've never tried it, so I can't tell you much more.
The QtWebKit links shows how to access from Python. You should be able to at least use subprocess to do the same with the others.
11 years later...
Taking a website screenshot using Python3.6 and Google PageSpeedApi Insights v5:
import base64
import requests
import traceback
import urllib.parse as ul
# It's possible to make requests without the api key, but the number of requests is very limited
url = "https://duckgo.com"
urle = ul.quote_plus(url)
image_path = "duckgo.jpg"
key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
strategy = "desktop" # "mobile"
u = f"https://www.googleapis.com/pagespeedonline/v5/runPagespeed?key={key}&strategy={strategy}&url={urle}"
try:
j = requests.get(u).json()
ss_encoded = j['lighthouseResult']['audits']['final-screenshot']['details']['data'].replace("data:image/jpeg;base64,", "")
ss_decoded = base64.b64decode(ss_encoded)
with open(image_path, 'wb+') as f:
f.write(ss_decoded)
except :
print(traceback.format_exc())
exit(1)
Notes:
Live Demo
Pros: Free
Cons: Low Resolution
Get API Key
Docs
Limits:
Queries per day = 25,000
Queries per 100 seconds = 400
Using Rendertron is an option. Under the hood, this is a headless Chrome exposing the following endpoints:
/render/:url: Access this route e.g. with requests.get if you are interested in the DOM.
/screenshot/:url: Access this route if you are interested in a screenshot.
You would install rendertron with npm, run rendertron in one terminal, access http://localhost:3000/screenshot/:url and save the file, but a demo is available at render-tron.appspot.com making it possible to run this Python3 snippet locally without installing the npm package:
import requests
BASE = 'https://render-tron.appspot.com/screenshot/'
url = 'https://google.com'
path = 'target.jpg'
response = requests.get(BASE + url, stream=True)
# save file, see https://stackoverflow.com/a/13137873/7665691
if response.status_code == 200:
with open(path, 'wb') as file:
for chunk in response:
file.write(chunk)
I can't comment on ars's answer, but I actually got Roland Tapken's code running using QtWebkit and it works quite well.
Just wanted to confirm that what Roland posts on his blog works great on Ubuntu. Our production version ended up not using any of what he wrote but we are using the PyQt/QtWebKit bindings with much success.
Note: The URL used to be: http://www.blogs.uni-osnabrueck.de/rotapken/2008/12/03/create-screenshots-of-a-web-page-using-python-and-qtwebkit/ I've updated it with a working copy.
This is an old question and most answers are a bit dated.
Currently, I would do 1 of 2 things.
1. Create a program that takes the screenshots
I would use Pyppeteer to take screenshots of websites. This runs on the Puppeteer package. Puppeteer spins up a headless chrome browser, so the screenshots will look exactly like they would in a normal browser.
This is taken from the pyppeteer documentation:
import asyncio
from pyppeteer import launch
async def main():
browser = await launch()
page = await browser.newPage()
await page.goto('https://example.com')
await page.screenshot({'path': 'example.png'})
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
2. Use a screenshot API
You could also use a screenshot API such as this one.
The nice thing is that you don't have to set everything up yourself but can simply call an API endpoint.
This is taken from the screenshot API's documentation:
import urllib.parse
import urllib.request
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# The parameters.
token = "YOUR_API_TOKEN"
url = urllib.parse.quote_plus("https://example.com")
width = 1920
height = 1080
output = "image"
# Create the query URL.
query = "https://screenshotapi.net/api/v1/screenshot"
query += "?token=%s&url=%s&width=%d&height=%d&output=%s" % (token, url, width, height, output)
# Call the API.
urllib.request.urlretrieve(query, "./example.png")
Using a web service s-shot.ru (so it's not so fast), but quite easy to set up what need through the link configuration.
And you can easily capture full page screenshots
import requests
import urllib.parse
BASE = 'https://mini.s-shot.ru/1024x0/JPEG/1024/Z100/?' # you can modify size, format, zoom
url = 'https://stackoverflow.com/'#or whatever link you need
url = urllib.parse.quote_plus(url) #service needs link to be joined in encoded format
print(url)
path = 'target1.jpg'
response = requests.get(BASE + url, stream=True)
if response.status_code == 200:
with open(path, 'wb') as file:
for chunk in response:
file.write(chunk)
You can use Google Page Speed API to achieve your task easily. In my current project, I have used Google Page Speed API`s query written in Python to capture screenshots of any Web URL provided and save it to a location. Have a look.
import urllib2
import json
import base64
import sys
import requests
import os
import errno
# The website's URL as an Input
site = sys.argv[1]
imagePath = sys.argv[2]
# The Google API. Remove "&strategy=mobile" for a desktop screenshot
api = "https://www.googleapis.com/pagespeedonline/v1/runPagespeed?screenshot=true&strategy=mobile&url=" + urllib2.quote(site)
# Get the results from Google
try:
site_data = json.load(urllib2.urlopen(api))
except urllib2.URLError:
print "Unable to retreive data"
sys.exit()
try:
screenshot_encoded = site_data['screenshot']['data']
except ValueError:
print "Invalid JSON encountered."
sys.exit()
# Google has a weird way of encoding the Base64 data
screenshot_encoded = screenshot_encoded.replace("_", "/")
screenshot_encoded = screenshot_encoded.replace("-", "+")
# Decode the Base64 data
screenshot_decoded = base64.b64decode(screenshot_encoded)
if not os.path.exists(os.path.dirname(impagepath)):
try:
os.makedirs(os.path.dirname(impagepath))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
# Save the file
with open(imagePath, 'w') as file_:
file_.write(screenshot_decoded)
Unfortunately, following are the drawbacks. If these do not matter, you can proceed with Google Page Speed API. It works well.
The maximum width is 320px
According to Google API Quota, there is a limit of 25,000 requests per day
You don't mention what environment you're running in, which makes a big difference because there isn't a pure Python web browser that's capable of rendering HTML.
But if you're using a Mac, I've used webkit2png with great success. If not, as others have pointed out there are plenty of options.
I created a library called pywebcapture that wraps selenium that will do just that:
pip install pywebcapture
Once you install with pip, you can do the following to easily get full size screenshots:
# import modules
from pywebcapture import loader, driver
# load csv with urls
csv_file = loader.CSVLoader("csv_file_with_urls.csv", has_header_bool, url_column, optional_filename_column)
uri_dict = csv_file.get_uri_dict()
# create instance of the driver and run
d = driver.Driver("path/to/webdriver/", output_filepath, delay, uri_dict)
d.run()
Enjoy!
https://pypi.org/project/pywebcapture/
Try this..
#!/usr/bin/env python
import gtk.gdk
import time
import random
while 1 :
# generate a random time between 120 and 300 sec
random_time = random.randrange(120,300)
# wait between 120 and 300 seconds (or between 2 and 5 minutes)
print "Next picture in: %.2f minutes" % (float(random_time) / 60)
time.sleep(random_time)
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
print "The size of the window is %d x %d" % sz
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
ts = time.time()
filename = "screenshot"
filename += str(ts)
filename += ".png"
if (pb != None):
pb.save(filename,"png")
print "Screenshot saved to "+filename
else:
print "Unable to get the screenshot."
import subprocess
def screenshots(url, name):
subprocess.run('webkit2png -F -o {} {} -D ./screens'.format(name, url),
shell=True)

Categories