How can I limit python script to 1 API trade per day? - python

I'm putting together a python script to make trades on poloniex with the API, and so far I've got it to make trades when certain conditions are met, but I still need it to NOT place anymore trades for the rest of that day (I have the entire script looping every 60 seconds).
So far I have this script:
import requests
import urllib.request
import urllib.parse
import http.client
import hashlib
import hmac
import time
import json
from urllib.request import urlopen
The_Currency_Pair = input('Which Currency Pair?\nPAIRS TO CHOOSE FROM:\nUSDT_BTC\nUSDT_XRP\nUSDT_ETH\nUSDT_BCH\nUSDT_STR\nUSDT_LTC\nUSDT_ETC\nUSDT_XMR\n')
api = 'https://poloniex.com/tradingApi'
key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
secret = 'XXXXXXXXXXXXXXXXXXXXXXXXX'
def main():
poloniexPrices = urlopen('https://poloniex.com/public?command=returnTicker').read()
poloniexjson = json.loads(poloniexPrices)
poloniexlastP = poloniexjson[The_Currency_Pair]['last']
poloniexOCHL = urlopen('https://poloniex.com/public?command=returnChartData&currencyPair=USDT_BCH&start=1538352000&period=86400').read()
poloniexOCHLjson = json.loads(poloniexOCHL)
poloniexlasthigh = poloniexOCHLjson[-2]['high']
print ('Last Price')
print (poloniexlastP)
print ('----------------------------------------')
print ('Last Day High')
print (poloniexlasthigh)
print ('----------------------------------------')
data = {
'command': 'returnBalances',
'nonce' : int(time.time() * 1000)
}
data = urllib.parse.urlencode(data).encode()
signature = hmac.new(secret.encode(), data, hashlib.sha512)
headers = {
'Key' : key,
'Sign': signature.hexdigest()
}
request = urllib.request.Request(
url=api, data=data, headers=headers, method='POST'
)
text = urllib.request.urlopen(request).read().decode()
print ('MY ACCOUNT BALANCE')
try:
print(json.loads(text)['USDT'])
except:
print(text)
print ('-----------------------------------------')
if float(poloniexlastP) > 0:
print ('PLACING TRADE')
print ('-----------------------------------------------')
parms = {"command":"buy",
"currencyPair":The_Currency_Pair,
"rate":100,
"immediateOrCancel":1,
"amount":0.01,
"nonce":int(time.time() * 1000)}
parms = urllib.parse.urlencode(parms).encode()
signature = hmac.new(secret.encode(), parms, hashlib.sha512)
headers = {'Key' : key,
'Sign': signature.hexdigest()}
request = urllib.request.Request(
url=api, data=parms, headers=headers, method='POST')
text = urllib.request.urlopen(request).read().decode()
ordernumber = (json.loads(text)['orderNumber'])
print ('Order Number:')
print (ordernumber)
while True:
main()
time.sleep(60)
Anyway, after a trade has been placed, I need it to make sure that after the 60 second sleep, it doesn't make a second trade unless it is a new day/the day after the trade was made. (Could I use poloniex server time for this?)
So, if it has got as far as print (ordernumber) that means it has placed a trade. But how do I mark it as placed trade for the day or something and use it in the if float(poloniexlastP) > 0: for the next loop to make sure it doesn't place another one?

Maybe you can use Python to get the date, and create a global variable, and after the print statement, you can set the variable to the current date, and the coffee will check if it has already sent, that way it doesn't execute more than once in a day.
import datetime
# This Gets The Day Of The Month
todaysDateNumber = int(datetime.datetime.now().strftime("%d"))
dateNumberTradeSent = 0
if todaysDateNumber == dateNumberTradeSent:
print("The API has already been used once today, try again tomorrow!")
return
else:
# Call Your Trade Sending Code Here
# After The Print Statement That Shows That The Trade Was Sent:
global dateNumberTradeSent
dateNumberTradeSent = int(datetime.datetime.now().strftime("%d"))

Related

Discord API: Providing messages from incorrect time period

Im trying to get all messages from a channel from 2 days ago till now. instead, it's giving me the messages from the inception of the channel. Don't know what I'm doing wrong here...
here is the code
import requests
import json
import datetime
def retrieve_messages(channelid, after=None, before=None):
headers = {
'authorization': "<TOKEN>"
}
# Build the query string, including the after and before parameters
# if they were specified
query_string = '?'
if after:
query_string += f'&after={after}'
if before:
query_string += f'&before={before}'
# Make the API call, including the query string
r = requests.get(
f'https://discord.com/api/v9/channels/{channelid}/messages{query_string}', headers=headers)
jsonn = json.loads(r.text)
for value in jsonn:
print(value['content'], '\n')
# Retrieve messages from the past two days
two_days_ago = datetime.datetime.utcnow() - datetime.timedelta(days=2)
after = int(two_days_ago.timestamp())
retrieve_messages("<CHANNEL_ID>", after=after)
I finally figured it out. It is in the documentation
# Define the DISCORD_EPOCH constant
DISCORD_EPOCH = 1420070400000
# Convert the x_days_ago variable to a timestamp in milliseconds
timestamp_ms = int(x_days_ago.timestamp() * 1000)
# Subtract the DISCORD_EPOCH constant from the timestamp to get the correct format for the after parameter
after = (timestamp_ms - DISCORD_EPOCH) << 22
The discord.py API uses timestamps in milliseconds, not seconds. So:
# ...
if after:
query_string += f'&after={after * 1000}' # Multiply by 1000 (convert to ms)
if before:
query_string += f'&before={before}'

Python API script

I am making a python script using API of a free test automation website called TestProject.
Link to their API: https://api.testproject.io/docs/v2/
Basically what i want to do is grab pdf of reports of all tests and save them somewhere.
But to make the GET request to do that i first need projectID and jobID which i already wrote functions getting them and saving them in the array.
But now i have a problem where its looping through both lists and not using correct projectID and jobID and its throwing errors because it does not exist.
So what i need is something to check if jobID is in projectID so that way i can make a GET request to get all the executionID's to get the PDF of the report.
I am kinda new to programming so i would love any help i can get. If anyone has any better solutions please feel free to let me know.
My script:
import requests
import json
import csv
from datetime import datetime
from jsonpath_ng import jsonpath, parse
API_key = 'api_key'
headers = {'Authorization':'{}'.format(API_key)}
list_projectId = []
list_jobId = []
list_executionId = []
ParseData_projectId = parse('$..id')
ParseData_jobId = parse('$..id')
ParseData_executionId = parse('$..id')
def parsing (response,ParseData,list_data):
# parses data and appends it to the list
Data = json.loads(response)
Parsaj = ParseData
Podatki = Parsaj.find(Data)
for i in range(0, len(Podatki)):
vrednost = Podatki[i].value
list_data.append(vrednost)
def projectId():
# gets all projectId's and saves them in list_projectId
url = 'https://api.testproject.io/v2/projects?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_projectId,list_projectId)
def jobId():
# gets all jobId's and saves them in list_jobId
for i in range(0, len(list_projectId)):
id = list_projectId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(id) + '/jobs?onlyScheduled=false&_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_jobId,list_jobId)
def executionId():
# Their API link:
# https://api.testproject.io/v2/projects/{projectId}/jobs/{jobId}/reports?_start=0
# the for loop below does not work here is where i need the help:
for i in range(0, len(list_projectId)):
project_id = list_projectId[i]
job_id = list_jobId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(project_id) + '/jobs/{}'.format(job_id) + '/reports?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_executionId,list_executionId)
projectId()
print("----------LIST PROJECT ID: ----------")
print(list_projectId)
print("")
jobId()
print("----------LIST JOB ID: ----------")
print(list_jobId)
executionId()
print("----------LIST EXECUTION ID: ----------")
print(list_executionId)
you have to use 'in' operator to check the value exist in the list data structure.

Python Navigation Timeout Exceeded: 8000 ms exceeded

I keep getting this error on multiple scripts, I'm doing a lot of scraping, I have a loop that scrapes through hundreds of pages, and at some point the scripts just stops due to this error.
Here's an example of a script
Example 2:
def scrape(urls):
for url in urls:
session = HTMLSession()
resp = session.get(url)
resp.html.render()
try:
phone = resp.html.find('span.phone')[0].text
except IndexError:
phone = None
biz_name = resp.html.find('h1')[0].text
try:
biz_desc = resp.html.find('p.biz-description-text')[0].text
except IndexError:
biz_desc = None
biz_location = resp.html.find('span.title-address-text')[0].text
city = biz_location.split(',')[-1]
print(
f'phone is: {phone}\nthe business name is: {biz_name}\nthe description is: {biz_desc}\nthe city is: {city}')
import_data(biz_name, phone, biz_desc, city)
def import_data(name, phone, desc, city):
global keyword
wp_title_box = driver.find_element_by_xpath('//*[#id="title"]')
wp_title_box.send_keys(name)
time.sleep(1)
wp_desc_box = driver.find_element_by_xpath('//*[#id="content_ifr"]')
wp_desc_box.send_keys(desc)
time.sleep(1)
new_field_button = driver.find_element_by_xpath('//*[#id="newmeta-submit"]')
select_box = Select(driver.find_element_by_xpath('//*[#id="metakeyselect"]'))
select_box.select_by_value("ad_city")
wp_city_fill = driver.find_element_by_xpath('//*[#id="metavalue"]')
wp_city_fill.send_keys(city)
new_field_button.click()
time.sleep(2)
select_box.select_by_value("ad_phone")
wp_city_fill = driver.find_element_by_xpath('//*[#id="metavalue"]')
wp_city_fill.send_keys(phone)
new_field_button.click()
time.sleep(2)
select_box.select_by_value("ad_promote")
wp_city_fill = driver.find_element_by_xpath('//*[#id="metavalue"]')
wp_city_fill.send_keys('1')
new_field_button.click()
time.sleep(2)
save_btn = driver.find_element_by_xpath('//*[#id="save-post"]')
driver.execute_script("window.scrollTo(0,0);")
time.sleep(1)
save_btn.click()
time.sleep(2)
driver.find_element_by_xpath('//*[#id="menu-posts"]/ul/li[3]/a').click()
time.sleep(2)
I've added example 2, as example 1 was solved by a loop provided below.
In the second example the script should end since I'm using a for loop, once it has finished going through all of the urls and importing them, it should be done, am I missing something?
Your program never terminates. Number calls scrape, which calls number, which calls scrape, which calls number etc. If you are going to use recursion you need to have a terminating or base case.
One suggestion is using a counter to track the depth of your recursion and then increment the counter at each step until it reaches the specified depth.
I do think for what you are doing you do not need recursion at all which is expensive due to the overhead of function calls. A simple loop would be fine:
import random
import urllib3
from requests_html import HTMLSession
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def scrape(rand_num):
session = HTMLSession()
resp = session.get("https://www.example.com/prize/?d=" + '92' + str(rand_num))
resp.html.render()
print(f'trying coupon code 92{rand_num}')
prize = resp.html.find(containing="You've won a prize")
print(prize)
if prize:
print("https://www.example.com/prize/?d=" + '92' + str(rand_num))
def number():
for i in range(99999999):
x = random.randint(00000000, 99999999)
scrape(x)
number()

Python: Nested while loop inside a for loop breaking unexpectedly without any error

I have a while loop nested within a for loop that's running over a json array collected from firestore, which collects stock symbols to pass to another api to gather minute by minute trading data to put back into the firestore db.
While I'm running the loop, it'll stop unexpectedly without any error around the fourth or sixth (never more) time through the 389 entry while loop.
Any idea why this is? Is it something with my code? I noticed if I changed the limit in the while loop from 389 down to 100, it worked through all the companies within the json array. But it won't get through many than four companies down the list if it's the full 389 entries.
Anyway, thanks for the help!
import requests
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
cred = credentials.Certificate("./serviceAccountKey.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
doc_ref1 = db.collection(u'Quiver').stream()
for doc in doc_ref1:
symbol = doc.id
api_url = "https://api.iextrading.com/1.0/stock/{}/chart/1d".format(symbol)
query_url = api_url
r = requests.get(query_url)
if r.status_code != 200:
print("Error:", r.status_code)
continue
if r.status_code == 404:
print("Error:", r.status_code, symbol)
continue
json_stock = r.json()
b = 0
while b <= 100:
try:
date = json_stock[b]['date']
minute = json_stock[b]['minute']
label = json_stock[b]['label']
high = json_stock[b]['high']
low = json_stock[b]['low']
average = json_stock[b]['average']
volume = json_stock[b]['volume']
notional = json_stock[b]['notional']
numberOfTrades = json_stock[b]['numberOfTrades']
marketHigh = json_stock[b]['marketHigh']
marketLow = json_stock[b]['marketLow']
marketAverage = json_stock[b]['marketAverage']
marketVolume = json_stock[b]['marketVolume']
marketNotional = json_stock[b]['marketNotional']
marketNumberOfTrades = json_stock[b]['marketNumberOfTrades']
open = json_stock[b]['open']
close = json_stock[b]['close']
marketOpen = json_stock[b]['marketOpen']
marketClose = json_stock[b]['marketClose']
changeOverTime = json_stock[b]['changeOverTime']
marketChangeOverTime = json_stock[b]['marketChangeOverTime']
doc_ref = db.collection(u'dailies').document(u'{}-{}'.format(minute, symbol))
doc_ref.set({
u'date':u'{}'.format(date),
u'minute':u'{}'.format(minute),
u'label':u'{}'.format(label),
u'high':u'{}'.format(high),
u'average':u'{}'.format(average),
u'notional':u'{}'.format(notional),
u'number of trades':u'{}'.format(numberOfTrades),
u'market high':u'{}'.format(marketHigh),
u'market low':u'{}'.format(marketLow),
u'market average':u'{}'.format(marketAverage),
u'market volume':u'{}'.format(marketVolume),
u'market notional':u'{}'.format(marketNotional),
u'market number of trades':u'{}'.format(marketNumberOfTrades),
u'open':u'{}'.format(open),
u'close':u'{}'.format(close),
u'market open':u'{}'.format(marketOpen),
u'market close':u'{}'.format(marketClose),
u'change over time':u'{}'.format(changeOverTime),
u'market change over time':u'{}'.format(marketChangeOverTime)
})
print("{} {}: {}".format(symbol, minute, b))
b += 1
except IndexError:
print("Index Error")
break
you can use:
except Exception as errmsg:
print(errmsg)
and provide more information

AWS Lambda - How do I convert my code to work in AWS?

I'm struggling to get a Lambda function working. I have a python script to access twitter API, pull information, and export that information into an excel sheet. I'm trying to transfer python script over to AWS/Lambda, and I'm having a lot of trouble.
What I've done so far: Created AWS account, setup S3 to have a bucket, and poked around trying to get things to work.
I think the main area I'm struggling is how to go from a python script that I'm executing via local CLI and transforming that code into lambda-capable code. I'm not sure I understand how the lambda_handler function works, what the event or context arguments actually mean (despite watching a half dozen different tutorial videos), or how to integrate my existing functions into Lambda in the context of the lambda_handler, and I'm just very confused and hoping someone might be able to help me get some clarity!
Code that I'm using to pull twitter data (just a sample):
import time
import datetime
import keys
import pandas as pd
from twython import Twython, TwythonError
import pymysql
def lambda_handler(event, context):
def oauth_authenticate():
twitter_oauth = Twython(keys.APP_KEY, keys.APP_SECRET, oauth_version=2)
ACCESS_TOKEN = twitter_oauth.obtain_access_token()
twitter = Twython(keys.APP_KEY, access_token = ACCESS_TOKEN)
return twitter
def get_username():
"""
Prompts for the screen name of targetted account
"""
username = input("Enter the Twitter screenname you'd like information on. Do not include '#':")
return username
def get_user_followers(username):
"""
Returns data on all accounts following the targetted user.
WARNING: The number of followers can be huge, and the data isn't very valuable
"""
#username = get_username()
#import pdb; pdb.set_trace()
twitter = oauth_authenticate()
datestamp = str(datetime.datetime.now().strftime("%Y-%m-%d"))
target = twitter.lookup_user(screen_name = username)
for y in target:
target_id = y['id_str']
next_cursor = -1
index = 0
followersdata = {}
while next_cursor:
try:
get_followers = twitter.get_followers_list(screen_name = username,
count = 200,
cursor = next_cursor)
for x in get_followers['users']:
followersdata[index] = {}
followersdata[index]['screen_name'] = x['screen_name']
followersdata[index]['id_str'] = x['id_str']
followersdata[index]['name'] = x['name']
followersdata[index]['description'] = x['description']
followersdata[index]['date_checked'] = datestamp
followersdata[index]['targeted_account_id'] = target_id
index = index + 1
next_cursor = get_followers["next_cursor"]
except TwythonError as e:
print(e)
remainder = (float(twitter.get_lastfunction_header(header = 'x-rate-limit-reset')) \
- time.time())+1
print("Rate limit exceeded. Waiting for:", remainder/60, "minutes")
print("Current Time is:", time.strftime("%I:%M:%S"))
del twitter
time.sleep(remainder)
twitter = oauth_authenticate()
continue
followersDF = pd.DataFrame.from_dict(followersdata, orient = "index")
followersDF.to_excel("%s-%s-follower list.xlsx" % (username, datestamp),
index = False, encoding = 'utf-8')

Categories