I'm new to python and I want this code to run only once and stops, not every 30 seconds
because I want to run multiple codes like this with different access tokens every 5 seconds using the command line.
and when I tried this code it never jumps to the second one because it's a while true:
import requests
import time
api_url = "https://graph.facebook.com/v2.9/"
access_token = "access token"
graph_url = "site url"
post_data = { 'id':graph_url, 'scrape':True, 'access_token':access_token }
# Beware of rate limiting if trying to increase frequency.
refresh_rate = 30 # refresh rate in second
while True:
try:
resp = requests.post(api_url, data = post_data)
if resp.status_code == 200:
contents = resp.json()
print(contents['title'])
else:
error = "Warning: Status Code {}\n{}\n".format(
resp.status_code, resp.content)
print(error)
raise RuntimeWarning(error)
except Exception as e:
f = open ("open_graph_refresher.log", "a")
f.write("{} : {}".format(type(e), e))
f.close()
print(e)
time.sleep(refresh_rate)
From what I understood you're trying to execute the piece of code for multiple access tokens. To make your job simple, have all your access_tokens as lists and use the following code. It assumes that you know all your access_tokens in advance.
import requests
import time
def scrape_facebook(api_url, access_token, graph_url):
""" Scrapes the given access token"""
post_data = { 'id':graph_url, 'scrape':True, 'access_token':access_token }
try:
resp = requests.post(api_url, data = post_data)
if resp.status_code == 200:
contents = resp.json()
print(contents['title'])
else:
error = "Warning: Status Code {}\n{}\n".format(
resp.status_code, resp.content)
print(error)
raise RuntimeWarning(error)
except Exception as e:
f = open (access_token+"_"+"open_graph_refresher.log", "a")
f.write("{} : {}".format(type(e), e))
f.close()
print(e)
access_token = ['a','b','c']
graph_url = ['sss','xxx','ppp']
api_url = "https://graph.facebook.com/v2.9/"
for n in range(len(graph_url)):
scrape_facebook(api_url, access_token[n], graph_url[n])
time.sleep(5)
Related
import requests
import json
import threading
data = {
"amount": 2
}
def foo(data):
try:
r = requests.post(url = "www.mysite.com", data = data)
j = json.loads(r.text)
print(j)
except requests.exceptions.RequestException as e:
raise SystemExist(e)
threading.Timer(1, foo, [data]).start()
I want to run this http request every second using a thread in my program. However, the program only runs the http request once and exit. How do I fix this?
You need to restart the timer after each request :
def foo(data):
try:
r = requests.post(url = "www.mysite.com", data = data)
j = json.loads(r.text)
print(j)
threading.Timer(1, foo, [data]).start() # New Line Added
except requests.exceptions.RequestException as e:
raise SystemExist(e)
Here I have a rate stream that outputs the following and i'm looking to only print the "bid" price. Could someone help explain how I can parse the output correctly? It's driving me crazy!
example = 1.05653
I need the output without quotes or any other markup as well..
JSON
{
"tick": {
"instrument": "EUR_USD",
"time": "2015-04-13T14:28:26.123314Z",
"bid": 1.05653,
"ask": 1.05669
}
}
My code:
import requests
import json
from optparse import OptionParser
def connect_to_stream():
"""
Environment <Domain>
fxTrade stream-fxtrade.oanda.com
fxTrade Practice stream-fxpractice.oanda.com
sandbox stream-sandbox.oanda.com
"""
# Replace the following variables with your personal ones
domain = 'stream-fxpractice.oanda.com'
access_token = 'xxxxx'
account_id = 'xxxxxxxxx'
instruments = "EUR_USD"
try:
s = requests.Session()
url = "https://" + domain + "/v1/prices"
headers = {'Authorization' : 'Bearer ' + access_token,
# 'X-Accept-Datetime-Format' : 'unix'
}
params = {'instruments' : instruments, 'accountId' : account_id}
req = requests.Request('GET', url, headers = headers, params = params)
pre = req.prepare()
resp = s.send(pre, stream = True, verify = False)
return resp
except Exception as e:
s.close()
print "Caught exception when connecting to stream\n" + str(e)
def demo(displayHeartbeat):
response = connect_to_stream()
if response.status_code != 200:
print response.text
return
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
if msg.has_key("instrument") or msg.has_key("tick"):
print line
if displayHeartbeat:
print line
else:
if msg.has_key("instrument") or msg.has_key("tick"):
print line
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-b", "--displayHeartBeat", dest = "verbose", action = "store_true",
help = "Display HeartBeat in streaming data")
displayHeartbeat = False
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("incorrect number of arguments")
if options.verbose:
displayHeartbeat = True
demo(displayHeartbeat)
if __name__ == "__main__":
main()
Sorry if this is an extremely basic question but I'm not that familiar with python..
Thanks in advance!
You are iterating over the stream line by line attempting to parse each line as JSON. Each line alone is not proper JSON so that's one problem.
I would just regex over each hline you bring in looking for the text "bid: " followed by a decimal number, and return that number as a float. For example:
import re
for line in response.iter_lines(1):
matches = re.findall(r'\"bid\"\:\s(\d*\.\d*)', line)
if len(matches) > 0:
print float(matches[0])
Try something along the lines of this:
def demo(displayHeartbeat):
response = connect_to_stream()
for line in response.iter_lines():
if line.startswith(" \"bid\"")
print "bid:"+line.split(":")[1]
This actually turned out to be pretty easy, I fixed it by replacing the "demo" function with this:
def demo(displayHeartbeat):
response = connect_to_stream()
if response.status_code != 200:
print response.text
return
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
if displayHeartbeat:
print line
else:
if msg.has_key("instrument") or msg.has_key("tick"):
print msg["tick"]["ask"] - .001
instrument = msg["tick"]["instrument"]
time = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
I tried to get the ice cast meta data of a mp3 stream with this script:
import requests
url = 'http://stream.jam.fm/jamfm-nmr/mp3-128/konsole/'
try:
response = requests.get(url, headers={'Icy-MetaData': 1}, stream=True)
response.raise_for_status()
except requests.RequestException, e:
print 'Error:', e
else:
headers, stream = response.headers, response.raw
meta_int = headers.get('icy-metaint')
if meta_int is not None:
audio_length = int(meta_int)
while True:
try:
audio_data = stream.read(audio_length)
meta_byte = stream.read(1)
if (meta_byte):
meta_length = ord(meta_byte) * 16
meta_data = stream.read(meta_length)
print meta_data
except KeyboardInterrupt:
break
response.close()
This works but just for the first package. I will never receive an update on the title information when the track changes. My question is: Is this intended behavior and the track info is just send once or did I something wrong? I would like to be able to notice a track change without polling the stream from time to time.
while True:
try:
#new request
response = requests.get(url, headers={'Icy-MetaData': 1}, stream=True)
response.raise_for_status()
headers, stream = response.headers, response.raw
meta_int = headers.get('icy-metaint')
audio_data = stream.read(audio_length)
meta_byte = stream.read(1)
if (meta_byte):
meta_length = ord(meta_byte) * 16
meta_data = stream.read(meta_length)
print (meta_data)
except KeyboardInterrupt:
break
So I have this code that is creating an output in Excel.
What I want to do now is get the parameters (lid) in payload to loop through a list of other ID's
This list is stored in a txt file.
can anyone modify my code to show me how to do that please?
The text file has values
1654,
3457,
4327,
1234
(can also hard code these somewhere in the script if it is easier)
from __future__ import print_function
import sys
import csv
import collections
import itertools
try:
import requests
from requests import exceptions
import base64
import json
except ImportError as e:
import requests
from requests import exceptions
import base64
import json
print ("Import Error: %s" % e)
API_TOKEN = u''
b64token = base64.b64encode(bytes(API_TOKEN))
REST_BASE_URL = u'https://visdasa.dsds.com/rest/'
# API URL request examples (choose one)
REST_URL = u'rawdata/'
FULL_URL = REST_BASE_URL + REST_URL
def retrieve_data(api_url):
try:
#connect to the API and retrieve data
bauth_header = {'Authorization': 'Basic '+b64token.decode('UTF-8')}
payload = {'start': '2014-08-01T00:00:01', 'stop': '2014- 8-01T23:59:59','category': 'ots','lid': '9263'}
response = requests.get(api_url, headers=bauth_header, params=payload)
# check the api response
if response.status_code == requests.codes.ok:
# Convert from json data
json_data = json.loads(response.text)
Header_String = "ID", "Site Name", "Network ID", "Network Lablel", "Company Branch ID", "Comapany Label","Count", "timestamp", "ots_duration", "notsure1", "notsure2"
for location_row in json_data["data"]["locations"]:
Location_string = (location_row["id"], location_row["label"], location_row["site"]["network"]["id"],location_row["site"]["network"]["label"],
location_row["site"]["id"], location_row["site"]["label"])
try:
with open('C:\\Users\\teddy\\Desktop\\party\\test.csv', 'w') as wFile:
writer = csv.writer(wFile, delimiter=',')
writer.write(Header_string)
for row in json_data["data"]["raw_data"]:
writer.writerow(row)
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
print( "I/O error({0}): {1}".format(e.errno, e.strerror))
else:
json_data = json.loads(response.text)
# If not successful api call the throw an error
raise requests.RequestException("Error with the api. Status code : %i \n Json response: %s"
% (response.status_code, json_data))
except (requests.exceptions.ProxyError, requests.RequestException) as e:
print (e)
def main():
#retrieve_data(FULL_URL, PROXY_SETTINGS)
retrieve_data(FULL_URL)
sys.exit()
if __name__ == '__main__':
main()
Why not just pass all the lid values as a parameter to your retrieve_data function.
def retrieve_data(api_url):
would become
def retrieve_data(api_url, lid_value):
You would remove the hardcoded lid section of your payload so the payload would look like this
payload = {'start': '2014-08-01T00:00:01', 'stop': '2014- 8-01T23:59:59','category': 'ots'}
Then on the next line you can add
payload['lid'] = lid_value
In your main function you could then loop through the values in the text file. Here is a simple loop with a list.
def main():
lid_values = ['1654', '3457', '4327', '1234']
for lid in lid_values:
retrieve_data(FULL_URL, lid)
sys.exit()
def do_request(url, token, json_data=None,
mode="get", work_around_for_image_custom_list=False):
"""Uploads a file. """
header_collection = {"X-Auth-Token": token}
if json_data is not None:
header_collection['Content-Type'] = 'application/json'
try:
if mode == "delete":
# this looks ugly, but there is absolutely no way to
# get requests to do DELETE when there is a blank JSON
# included
r = requests.delete(url, headers=header_collection, timeout=10)
else:
r = getattr(requests, mode)(url, data=json.dumps(json_data),
headers=header_collection, timeout=10)
if r.status_code == 200:
#This looks ugly also, but has to be for a particular function that calls it
if work_around_for_image_custom_list:
return r
else:
http_info = (json.dumps(r.json(), indent=2), r.status_code)
else:
http_info = (r.text, r.status_code)
return http_info
except requests.exceptions.ConnectionError:
print "Connection Error! Http status Code {}".format(r.status_code)
sys.exit()
except (requests.exceptions.RequestException,
requests.exceptions.HTTPError):
print "Ambiguous Error! Http status Code {}".format(r.status_code)
sys.exit()
Using Python 2.7 and the requests module, I have this function that I call with several other functions to make api calls. However, I have to make an exception for one particular function and return the request object...if work_around_for_image_custom_list is True. This seems seems like a ugly hack/work around and I am wondering how I could re-write it to compensate if work_around_for_image_custom_list. For instance, would it be better to make this a class and have each function create a object to use it? If so, how would I over ride if r.status_code == 200:?
Expanding on the comment I made:
def do_raw_request(url, token, json_data=None, mode="get"):
"""Uploads a file. """
header_collection = {"X-Auth-Token": token}
if json_data is not None:
header_collection['Content-Type'] = 'application/json'
try:
if mode == "delete":
# this looks ugly, but there is absolutely no way to
# get requests to do DELETE when there is a blank JSON
# included
r = requests.delete(url, headers=header_collection, timeout=10)
else:
r = getattr(requests, mode)(url, data=json.dumps(json_data),
headers=header_collection, timeout=10)
if r.status_code == 200:
return r, r.status_code
return r.text, r.status_code
except requests.exceptions.ConnectionError:
print "Connection Error! Http status Code {}".format(r.status_code)
sys.exit()
except (requests.exceptions.RequestException,
requests.exceptions.HTTPError):
print "Ambiguous Error! Http status Code {}".format(r.status_code)
sys.exit()
Then:
def do_request(url, token, json_data=None, mode="get"):
res, code = do_raw_request(url, token, json_data, mode)
if code == 200:
return (json.dumps(r.json(), indent=2), r.status_code)
return res, code
and now you call either do_raw_request or do_request as appropriate.
Note that I changed the return so it always returns a tuple otherwise you would have to start checking types to know whether you have a status text or a response object.