not been able to send a GET request using python - python

I am having some functions in javascript that does the string manipulations and then my backend updates that data to database.
In short, I have a url for sending data to backend, then this data is send to javascript fucntion, js does manipulation and the using ajax request I send the manipulated string to backend, which updates my database.
I am using flask framework
Here is what I have written till now
#this url is where I send a GET request
#app.route('/api',methods=['GET'])
def text():
text = request.args.get('text','')
lang = request.args.get('lang','')
return render_template('test.html',text=text,lang=lang)
Now JS does the manipulation of the strings and sends a ajax GET request to following url
#app.route('/files/<text>',methods=['GET'])
def fi(text):
if request.method == 'GET':
textValue = text.encode("utf-8")
INSERT_DB = 'INSERT INTO text (text) VALUES (%s)'
db = connect_mysql()
cursor = db.cursor()
cursor.execute(INSERT_DB,[textValue])
db.commit()
cursor.close()
db.close()
return ''
Now I check if the data is save to database or not in following url
#app.route('/test',methods=['GET'])
def test():
if request.method == 'GET':
db_select_last = "SELECT text FROM text order by id DESC limit 1"
db = connect_mysql()
cursor = db.cursor()
cursor.execute(db_select_last)
data = cursor.fetchone()
cursor.close()
db.close()
return (data['text'])
But what I face problem is when I manually hit the url from browser it updates the data. but when I send a GET request from python it doesn't. Why is this so.
Here's how I send a GET request to that url
main_url is http://fyp-searchall.rhcloud.com/
>>> import requests
>>> url = 'http://fyp-searchall.rhcloud.com/api?text=some body&lang=marathi'
>>> r = requests.get(url)
But data does not update. Where I am doing wrong ?
I got to know that JS works only when you have browser, so what should I do now?

In requests you should pass query parameters as a dictionary, using the params keyword:
import requests
url = 'http://fyp-searchall.rhcloud.com/api'
params = {'text': 'some body', 'lang': 'marathi'}
r = requests.get(url, params=params)
See the Quickstart here: http://docs.python-requests.org/en/master/user/quickstart/

Related

Could not process parameters: str(https://help.twitter.com/using-twitter/twitter-supported-browsers), it must be of type list, tuple or dict

I'm trying to make a multi threaded web crawler using python but I'm getting this error!
import threading
import requests
from bs4 import BeautifulSoup
import mysql.connector
# Function to extract URLs that start with http or https
def extract_urls(url):
# Make a request to the URL
response = requests.get(url)
# Parse the HTML content
soup = BeautifulSoup(response.text, 'html.parser')
# Find all the links in the HTML
links = soup.find_all('a')
# Extract the URLs that start with http or https
urls = [link['href'] for link in links if link['href'].startswith(
'http') or link['href'].startswith('https')]
# Return the list of URLs
return urls
# Function to crawl a URL using a separate thread
def crawl(url):
# Extract the URLs from the given URL
urls = extract_urls(url)
# Connect to the database
conn = mysql.connector.connect(
host='localhost',
user='root',
password='',
database='crawler'
)
# Create a cursor
cursor = conn.cursor()
# Insert the data from the array into the table
sql_insert_query = 'INSERT INTO my_transport (transport, fee) VALUES (%s, %s)'
result = cursor.executemany(sql_insert_query, urls)
# Commit the changes to the database
conn.commit()
# Close the connection
conn.close()
# Print the URLs
print(urls)
# Create a new thread for each URL
urls = ['https://www.facebook.com/', 'https://www.twitter.com/']
threads = []
for url in urls:
thread = threading.Thread(target=crawl, args=(url,))
thread.start()
threads.append(thread)
# Wait for all threads to complete
for thread in threads:
thread.join()
mysql.connector.errors.ProgrammingError: Could not process parameters: str(https://help.twitter.com/using-twitter/twitter-supported-browsers),
it must be of type list, tuple or dict
I haven't figured out the error, can somebody help me.
New to Python
result = cursor.executemany(sql_insert_query, urls)
This function accepts a collection as parameter, but you provided only one string.
MySQL Connector Documentation | executemany
Make sure you are passing a collection.

API Request - Store JSON - Analyze it later

I wanting to GET a JSON request from an API and then save those values for comparison and use for another API request. First I am having trouble saving the table.
def get_sma(ticker_symbol, API):
url = f"https://api.twelvedata.com/sma?symbol={ticker_symbol}&interval=1day&time_period=200&outputsize=1&apikey={api_key}"
response = requests.get(url).json()
return response
Using this method how do I recall the response, is it using "get_sma" ?
I don't have a key, but here is how to clean up the code you posted.
import requests
def get_sma(ticker_symbol, api_key):
url = f"https://api.twelvedata.com/sma?symbol={ticker_symbol}&interval=1day&time_period=200&outputsize=1&apikey={api_key}"
response = requests.get(url).json()
return response
aapl_response = get_sma("AAPL",None)
print(aapl_response)
amzn_response = get_sma("AMZN",None)
print(amzn_response)

Python script to retrieve data from MySQL and post it to web server

I am trying to come up with a python script to retrieve data from MySQL and post the data in json format to a web server. I have two separate python codes, one for retrieving the data in MySQL and one for posting the data in json format. The main issue that I am facing is that I do not know how to integrate them together.
Code for retrieving data from MySQL:
import MySQLdb
db = MySQLdb.connect("locahost", "root", "12345", "testdatabase")
curs=db.cursor()
curs.execute("SELECT * from mydata")
reading = curs.fetchall()
print "Data Info: %s" % reading
Code for posting to web server:
import json
import urllib2
import requests
data = {
'ID' :1
'Name' :Bryan
'Class' :3A
}
req = urllib2.Request('http://abcd.com') //not the actual url
req.add_header('Content type', 'application/json')
response=urllib.urlopen(req, json.dumps(data))
I have referenced the codes from the following links:
Retrieve data from MySQL
Retrieve data from MySQL 2nd link
Post to web server
Post to web server 2nd link
Would appreciate any form of assistance.
You can use the connection as a library file,
File connection.py :
def db_connect(query):
import MySQLdb db = MySQLdb.connect("locahost", "root", "12345", "testdatabase")
curs=db.cursor()
curs.execute(query)
reading = curs.fetchall()
return reading
Main file: webserver.py
import json
import urllib2
import requests
import connection
mysql_data = connection.db_connect("SELECT * from mydata")
#data = <Your logic to convert string to json>
req = urllib2.Request('http://abcd.com') //not the actual url
req.add_header('Content type', 'application/json')
response=urllib.urlopen(req, json.dumps(data))
Method 2 you can also try sql alachemy which gives directly dict data out of sql query. You can use filters instead of direct sql query.
I recomend this way is better and you can go through the link "https://pythonspot.com/en/orm-with-sqlalchemy/"
def db_connect(query):
import MySQLdb db = MySQLdb.connect("locahost", "root", "12345", "testdatabase")
curs=db.cursor()
curs.execute(query)
reading = curs.fetchall()
return reading

DELETE Request in Python "requests" module not working with body

I have been using this function to handle http requests with no problems:
def do_request(self, method, url, **kwargs):
params = kwargs.get('params', None)
headers = kwargs.get('headers', None)
payload = kwargs.get('data', None)
request_method = {'GET':requests.get, 'POST': requests.post, 'PUT': requests.put, 'DELETE': requests.delete}
request_url = url
req = request_method[method]
try:
res = req(request_url, headers=headers, params=params, data=json.dumps(payload))
except (requests.exceptions.ConnectionError, requests.exceptions.RequestException) as e:
data = {'has_error':True, 'error_message':e.message}
return data
try:
data = res.json()
data.update({'has_error':False, 'error_message':''})
except ValueError as e:
msg = "Cannot read response, %s" %(e.message)
data = {'has_error':True, 'error_message':msg}
if not res.ok:
msg = "Response not ok"
data.update({'has_error':True, 'error_message':msg})
if res.status_code >= 400:
msg = 'Error code: ' + str(res.status_code) + '\n' + data['errorCode']
data.update({'has_error':True, 'error_message': msg})
return data
When I have to do a DELETE request without body entity I have no problems but when I try to add one (when required by the server) I get an error message from the server telling that the body cannot be null as if no body has been sent. Any ideas why this might be happening? I'm using requests module and python 2.7.12. As far as I know data can be send in a DELETE request. Thanks!
There are problems with some clients and some servers when DELETE includes entity body: Is an entity body allowed for an HTTP DELETE request? for example & lots of search results.
Some servers (apparently) convert the DELETE into a POST, others simply perform the DELETE but drop the body. In your case, you've investigated that indeed, the body of a DELETE is dropped by the server & it has been suggested that you change the DELETE to POST.
Mmm... I can send a DELETE with body with Postman and works OK. But I cant get the same result with Requests 2.17.3
This is a issue related to Requests

Advice on automating datamining with Python

I am a Biologist with a little programming experience in Python. One of my research methods involves profiling large gene lists using this database: https://david.ncifcrf.gov/
Can anyone advise me on whether it would be possible to do a keyword search of the output and return the gene name associated with the keyword? This is for the "Table" output which looks this: https://david.ncifcrf.gov/annotationReport.jsp?annot=59,12,87,88,30,38,46,3,5,55,53,70,79&currentList=0
There are also backend and api options.
All insight and advice is greatly appreciated.
If there is an API which gives you all the data, you can automate almost everything associated with it. API's are either REST or SOAP so first you need to figure out what you need.
If the API is RESTful:
import urllib2, json
url = "https://mysuperapiurl.com/api-ws/api/port/"
u = 'APIUsername'
p = 'APIPassword'
def encodeUserData(user, password):
return "Basic " + (user + ":" + password).encode("base64").rstrip()
req = urllib2.Request(url)
req.add_header('Accept', 'application/json')
req.add_header("Content-type", "application/x-www-form-urlencoded")
req.add_header('Authorization', encodeUserData(u, p))
res = urllib2.urlopen(req)
j = json.load(res) # Here is all the data from the API
json_str= json.dumps(j) # this is the same as above as string
if the API is SOAP, it gets a bit harder. What I recommend is zeep. If that is not possible because your server is 2.6 or because several people are working on it, then use suds.
with suds an API call looks like this:
import logging, time, requests, re, suds_requests
from datetime import timedelta,date,datetime,tzinfo
from requests.auth import HTTPBasicAuth
from suds.client import Client
from suds.wsse import *
from suds import null
from cStringIO import StringIO
from bs4 import BeautifulSoup as Soup
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.INFO)
logging.getLogger('suds.transport').setLevel(logging.DEBUG)
logging.getLogger('suds.client').setLevel(logging.DEBUG)
WSDL_URL = 'http://213.166.38.97:8080/SRIManagementWS/services/SRIManagementSOAP?wsdl'
username='username'
password='password'
session = requests.session()
session.auth=(username, password)
def addSecurityHeader(client,username,password):
security=Security()
userNameToken=UsernameToken(username,password)
security.tokens.append(userNameToken)
client.set_options(wsse=security)
addSecurityHeader(client,username,password)
arg1 = "argument_1"
arg2 = "argument_2"
try:
client.service.GetServiceById(arg1, arg2)
except TypeNotFound as e:
print e
logresults = log_stream.getvalue()
You will receive xml in return so i use beautifulsoup to prettify the results:
soup = Soup(logresults)
print soup.prettify()
Ok so the API connection part is covered, where do you store your data, and where do you iterate over this data to perform a keyword search? In your database. I recommend MySQLdb. Setup your table and think about what information (that you collect from API) you're going to store in which column.
def dbconnect():
try:
db = MySQLdb.connect(
host='localhost',
user='root',
passwd='password',
db='mysuperdb'
)
except Exception as e:
sys.exit("Can't connect to database")
return db
def getSQL():
db = dbconnect()
cursor = db.cursor()
sql = "select * from yoursupertable"
dta = cursor.execute(sql)
results = cursor.fetchall()
return results
def dataResult():
results = getSQL()
for column in results:
id = (column[1])
print dataResult()
So this is where you set your keywords (could also do it via another SQL) and compare the results you extract from your database with a list, dict, textfile or hardcoded keywords and define what to do if they match etc :)

Categories