Logging into quora using python - python

I tried logging into quora using python. But it gives me the following error.
urllib2.HTTPError: HTTP Error 500: Internal Server Error
This is my code till now. I also work behind a proxy.
import urllib2
import urllib
import re
import cookielib
class Quora:
def __init__(self):
'''Initialising and authentication'''
auth = 'http://name:password#proxy:port'
cj = cookielib.CookieJar()
logindata = urllib.urlencode({'email' : 'email' , 'password' : 'password'})
handler = urllib2.ProxyHandler({'http' : auth})
opener = urllib2.build_opener(handler , urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
a = urllib2.urlopen('http://www.quora.com/' , logindata)
def main():
Quora()
Can someone please point out what is wrong?
if __name__ == '__main__':
main()

Try something like this:
# Load proxies
proxies = []
proxies_fp = open('proxies.txt', 'r') # A list of proxies
for proxy in proxies_fp:
proxies.append(proxy)
cookiejar = cookielib.CookieJar()
def perform_request(url, opener, credientials):
# Instantiate our request object
request = urllib2.Request(url)
# Perform the request, returning a pointer to the result set.
result = opener.urlopen(request, credentials)
return result
credentials ={
'username' : 'username',
'password' : 'password'
}
encoded_credentials = urllib.urlencode(credentials)
def main():
# Get random proxy
proxy = random.choice(proxies)
# Install our proxy
opener = urllib2.build_opener(
urllib2.ProxyHandler({'http': proxy}),
urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler(debuglevel=0),
urllib2.HTTPSHandler(debuglevel=0),
urllib2.HTTPCookieProcessor(cookiejar),
)
urllib2.install_opener(opener)
a = perform_request(url, opener, encoded_credentials)
--untested--
I've had to do something similar to this, and it worked for me this way. (Please note, that this is NOT an exact copy of code I used. I had to manipulate it a bit, and did NOT test)

Related

How to decline a pull request on Bitbucket from Python?

How do you use Bitbucket's 2.0 API to decline a pull request via Python?
According to their documentaion, it should be something like:
import requests
kwargs = {
'username': MY_BITBUCKET_ACCOUNT,
'repo_slug': MY_BITBUCKET_REPO,
'pull_request_id': pull_request_id
}
url = 'https://api.bitbucket.org/2.0/repositories/{username}/{repo_slug}/pullrequests/{pull_request_id}/decline'.format(**kwargs)
headers = {'Content-Type': 'application/json'}
response = requests.post(url, auth=(USERNAME, PASSWORD), headers=headers)
However, this fails with response.text simply saying "Bad Request".
This similar code works for me with their other API endpoints, so I'm not sure why the decline method is failing.
What am I doing wrong?
You have to authenticate with Oath. I wrote a wrapper for making these requests. Here is a simple example that works. The only thing I couldn't figure out was how to add a reason it was declined. I ended up making a request before I declined the PR that added a comment on why it was declined.
import os
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
class Bitbucket(object):
def __init__(self, client_id, client_secret, workplace, repo_slug):
self.workplace = workplace # username or company username
self.repo_slug = repo_slug
self.token_url = 'https://bitbucket.org/site/oauth2/access_token'
self.api_url = 'https://api.bitbucket.org/2.0/'
self.max_pages = 10
self.client = BackendApplicationClient(client_id=client_id)
self.oauth = OAuth2Session(client=self.client)
self.oauth.fetch_token(
token_url=self.token_url,
client_id=client_id,
client_secret=client_secret
)
def get_api_url(self, endpoint):
return f'{self.api_url}repositories/{self.workplace}/{self.repo_slug}/{endpoint}'
bitbucket = Bitbucket(os.environ['BITBUCKET_KEY'], os.environ['BITBUCKET_SECRET'], workplace='foo', repo_slug='bar')
pr_id = 1234
resp = bitbucket.oauth.post(f"{bitbucket.get_api_url('pullrequests')}/{pr_id}/decline")
if resp.status_code == 200:
print('Declined')
else:
print('Someting went wrong.')

Setting proxy to urllib.request (Python3)

How can I set proxy for the last urllib in Python 3.
I am doing the next
from urllib import request as urlrequest
ask = urlrequest.Request(url) # note that here Request has R not r as prev versions
open = urlrequest.urlopen(req)
open.read()
I tried adding proxy as follows :
ask=urlrequest.Request.set_proxy(ask,proxies,'http')
However I don't know how correct it is since I am getting the next error:
336 def set_proxy(self, host, type):
--> 337 if self.type == 'https' and not self._tunnel_host:
338 self._tunnel_host = self.host
339 else:
AttributeError: 'NoneType' object has no attribute 'type'
You should be calling set_proxy() on an instance of class Request, not on the class itself:
from urllib import request as urlrequest
proxy_host = 'localhost:1234' # host and port of your proxy
url = 'http://www.httpbin.org/ip'
req = urlrequest.Request(url)
req.set_proxy(proxy_host, 'http')
response = urlrequest.urlopen(req)
print(response.read().decode('utf8'))
I needed to disable the proxy in our company environment, because I wanted to access a server on localhost. I could not disable the proxy server with the approach from #mhawke (tried to pass {}, None and [] as proxies).
This worked for me (can also be used for setting a specific proxy, see comment in code).
import urllib.request as request
# disable proxy by passing an empty
proxy_handler = request.ProxyHandler({})
# alertnatively you could set a proxy for http with
# proxy_handler = request.ProxyHandler({'http': 'http://www.example.com:3128/'})
opener = request.build_opener(proxy_handler)
url = 'http://www.example.org'
# open the website with the opener
req = opener.open(url)
data = req.read().decode('utf8')
print(data)
Urllib will automatically detect proxies set up in the environment - so one can just set the HTTP_PROXY variable either in your environment e.g. for Bash:
export HTTP_PROXY=http://proxy_url:proxy_port
or using Python e.g.
import os
os.environ['HTTP_PROXY'] = 'http://proxy_url:proxy_port'
Note from the urllib docs: "HTTP_PROXY[environment variable] will be ignored if a variable REQUEST_METHOD is set; see the documentation on getproxies()"
import urllib.request
def set_http_proxy(proxy):
if proxy == None: # Use system default setting
proxy_support = urllib.request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = urllib.request.ProxyHandler({})
else: # Use proxy
proxy_support = urllib.request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
proxy = 'user:pass#ip:port'
set_http_proxy(proxy)
url = 'https://www.httpbin.org/ip'
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
html = response.read()
html

Urllib2 Seems to Ignore Proxy Settings

I'm behind a proxy and would like to use urllib2 to access external sites. If I set up the proxy in my environment, I can access external sites. When I set a proxy in urllib2, it seems to be ignored and the access fails.
The code I'm using is:
import urllib2
import os
import sys
uri = "https://www.python.org"
http_proxy_server = "192.168.12.20"
http_proxy_port = "8082"
http_proxy = "http://%s:%s" % (http_proxy_server, http_proxy_port)
def open_url_no_proxy():
sys.stdout.write('Proxy (none): ')
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
try:
opener.open(uri)
sys.stdout.write('PASS\n')
except urllib2.URLError:
sys.stdout.write('FAIL\n')
def open_url_system_proxy():
sys.stdout.write('Proxy (system): ')
opener = urllib2.build_opener()
try:
opener.open(uri)
sys.stdout.write('PASS\n')
except urllib2.URLError:
sys.stdout.write('FAIL\n')
def open_url_installed_opener():
sys.stdout.write('Proxy (installed): ')
proxy_handler = urllib2.ProxyHandler({"http": http_proxy})
opener = urllib2.build_opener(proxy_handler)
try:
opener.open(uri)
sys.stdout.write('PASS\n')
except urllib2.URLError:
sys.stdout.write('FAIL\n')
if __name__ == "__main__":
os.environ['no_proxy'] = 'localhost,127.0.0.1'
os.environ['NO_PROXY'] = 'localhost,127.0.0.1'
os.environ['http_proxy'] = http_proxy
os.environ['HTTP_PROXY'] = http_proxy
open_url_system_proxy()
open_url_no_proxy()
open_url_system_proxy()
open_url_installed_opener()
open_url_system_proxy()
The response I get on my system is:
$ python proxytest2.py
Proxy (system): PASS
Proxy (none): FAIL
Proxy (system): PASS
Proxy (installed): FAIL
Proxy (system): PASS
What am I doing wrong?
You've set up only a proxy for HTTP in the line below, but you're accessing an HTTPS site:
proxy_handler = urllib2.ProxyHandler({"http": http_proxy})
You need to modify this to
proxy_handler = urllib2.ProxyHandler({"http": http_proxy, "https": http_proxy})

Logging in to google using python?

I am fairly new to web programing but for the sake of it, I am trying to login to google account not using standard code but as a python application, but it is impossible to do so
has anyone tried to this before? can anyone help?
I made a python class that handle google login and the is able to get any google service page that requires the user to be logged in:
class SessionGoogle:
def __init__(self, url_login, url_auth, login, pwd):
self.ses = requests.session()
login_html = self.ses.get(url_login)
soup_login = BeautifulSoup(login_html.content).find('form').find_all('input')
my_dict = {}
for u in soup_login:
if u.has_attr('value'):
my_dict[u['name']] = u['value']
# override the inputs without login and pwd:
my_dict['Email'] = login
my_dict['Passwd'] = pwd
self.ses.post(url_auth, data=my_dict)
def get(self, URL):
return self.ses.get(URL).text
The idea is to go to the login page GALX hidden input value and send it back to google + login and password. It requires modules requests and beautifulSoup
Example of use:
url_login = "https://accounts.google.com/ServiceLogin"
url_auth = "https://accounts.google.com/ServiceLoginAuth"
session = SessionGoogle(url_login, url_auth, "myGoogleLogin", "myPassword")
print session.get("http://plus.google.com")
Hope this helps
Although probably not exactly what you were looking for here I found some code from a similar post that did run from me.
import urllib2
def get_unread_msgs(user, passwd):
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='New mail feed',
uri='https://mail.google.com',
user='%s#gmail.com' % user,
passwd=passwd
)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
feed = urllib2.urlopen('https://mail.google.com/mail/feed/atom')
return feed.read()
print get_unread_msgs("put-username-here","put-password-here")
reference:
How to auto log into gmail atom feed with Python?
2020 update for python 3:
import urllib.request
def unread_messages(user, passwd):
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='New mail feed',
uri='https://mail.google.com',
user='%s#gmail.com' % user,
passwd=passwd
)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
feed = urllib.request.urlopen('https://mail.google.com/mail/feed/atom')
return feed.read()
print(unread_messages('username', 'password'))
You can use urllib, urllib2 and cookielib libraries of python to login.
import urllib, urllib2, cookielib
def test_login():
username = '' # Gmail Address
password = '' # Gmail Password
cookie_jar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
login_dict = urllib.urlencode({'username' : username, 'password' :password})
opener.open('https://accounts.google.com/ServiceLogin', login_dict)
response = opener.open('https://plus.google.com/explore')
print response.read()
if __name__ == '__main__':
test_login()

How to specify an authenticated proxy for a python http connection?

What's the best way to specify a proxy with username and password for an http connection in python?
This works for me:
import urllib2
proxy = urllib2.ProxyHandler({'http': 'http://
username:password#proxyurl:proxyport'})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
urllib2.install_opener(opener)
conn = urllib2.urlopen('http://python.org')
return_str = conn.read()
Use this:
import requests
proxies = {"http":"http://username:password#proxy_ip:proxy_port"}
r = requests.get("http://www.example.com/", proxies=proxies)
print(r.content)
I think it's much simpler than using urllib. I don't understand why people love using urllib so much.
Setting an environment var named http_proxy like this: http://username:password#proxy_url:port
The best way of going through a proxy that requires authentication is using urllib2 to build a custom url opener, then using that to make all the requests you want to go through the proxy. Note in particular, you probably don't want to embed the proxy password in the url or the python source code (unless it's just a quick hack).
import urllib2
def get_proxy_opener(proxyurl, proxyuser, proxypass, proxyscheme="http"):
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, proxyurl, proxyuser, proxypass)
proxy_handler = urllib2.ProxyHandler({proxyscheme: proxyurl})
proxy_auth_handler = urllib2.ProxyBasicAuthHandler(password_mgr)
return urllib2.build_opener(proxy_handler, proxy_auth_handler)
if __name__ == "__main__":
import sys
if len(sys.argv) > 4:
url_opener = get_proxy_opener(*sys.argv[1:4])
for url in sys.argv[4:]:
print url_opener.open(url).headers
else:
print "Usage:", sys.argv[0], "proxy user pass fetchurls..."
In a more complex program, you can seperate these components out as appropriate (for instance, only using one password manager for the lifetime of the application). The python documentation has more examples on how to do complex things with urllib2 that you might also find useful.
Or if you want to install it, so that it is always used with urllib2.urlopen (so you don't need to keep a reference to the opener around):
import urllib2
url = 'www.proxyurl.com'
username = 'user'
password = 'pass'
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
# None, with the "WithDefaultRealm" password manager means
# that the user/pass will be used for any realm (where
# there isn't a more specific match).
password_mgr.add_password(None, url, username, password)
auth_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
print urllib2.urlopen("http://www.example.com/folder/page.html").read()
Here is the method use urllib
import urllib.request
# set up authentication info
authinfo = urllib.request.HTTPBasicAuthHandler()
proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib.request.build_opener(proxy_support, authinfo,
urllib.request.CacheFTPHandler)
# install it
urllib.request.install_opener(opener)
f = urllib.request.urlopen('http://www.python.org/')
"""

Categories