I am new to trading_ig library.
I have connected to my live IG account.
from trading_ig import IGService
from trading_ig_config import config
ig_service = IGService(config.username, config.password, config.api_key, config.acc_type)
ig_service.create_session()
#account_info = ig_service.switch_account(config.acc_number, False) # not necessary
#print(account_info)
open_positions = ig_service.fetch_open_positions()
print("open_positions:\n%s" % open_positions)
print("")
I have been able to download the latest 100 days of an index:
epic = 'IX.D.SPTRD.DAILY.IP'
resolution = 'D'
num_points = 100
response = ig_service.fetch_historical_prices_by_epic_and_num_points(epic, resolution, num_points)
df_ask = response['prices']['ask']
print("ask prices:\n%s" % df_ask)
Now, based on an algorithm that I have created, I want to be able to open and close a position automatically by letting the python code continuously run?
Do I need to use Lightstreamer?
How I open and close a position in Python?
Again, I am new to this and the IG documentation didn't provide much information.
Thanks for your help.
Related
I'm creating an application that downloads PDF's from a website and saves them to disk. I understand the Requests module is capable of this but is not capable of handling the logic behind the download (File size, progress, time remaining etc.).
I've created the program using selenium thus far and would like to eventually incorporate this into a GUI Tkinter app eventually.
What would be the best way to handle the downloading, tracking and eventually creating a progress bar?
This is my code so far:
from selenium import webdriver
from time import sleep
import requests
import secrets
class manual_grabber():
""" A class creating a manual downloader for the Roger Technology website """
def __init__(self):
""" Initialize attributes of manual grabber """
self.driver = webdriver.Chrome('\\Users\\Joel\\Desktop\\Python\\manual_grabber\\chromedriver.exe')
def login(self):
""" Function controlling the login logic """
self.driver.get('https://rogertechnology.it/en/b2b')
sleep(1)
# Locate elements and enter login details
user_in = self.driver.find_element_by_xpath('/html/body/div[2]/form/input[6]')
user_in.send_keys(secrets.username)
pass_in = self.driver.find_element_by_xpath('/html/body/div[2]/form/input[7]')
pass_in.send_keys(secrets.password)
enter_button = self.driver.find_element_by_xpath('/html/body/div[2]/form/div/input')
enter_button.click()
# Click Self Service Area button
self_service_button = self.driver.find_element_by_xpath('//*[#id="bs-example-navbar-collapse-1"]/ul/li[1]/a')
self_service_button.click()
def download_file(self):
"""Access file tree and navigate to PDF's and download"""
# Wait for all elements to load
sleep(3)
# Find and switch to iFrame
frame = self.driver.find_element_by_xpath('//*[#id="siteOutFrame"]/iframe')
self.driver.switch_to.frame(frame)
# Find and click tech manuals button
tech_manuals_button = self.driver.find_element_by_xpath('//*[#id="fileTree_1"]/ul/li/ul/li[6]/a')
tech_manuals_button.click()
bot = manual_grabber()
bot.login()
bot.download_file()
So in summary, I'd like to make this code download PDF's on a website, store them in a specific directory (named after it's parent folder in the JQuery File Tree) and keep tracking of the progress (file size, time remaining etc.)
Here is the DOM:
I hope this is enough information. Any more required please let me know.
I would recommend using tqdm and the request module for this.
Here is a sample code that effectively achieves that hard job of downloading and updating progress bar.
from tqdm import tqdm
import requests
url = "http://www.ovh.net/files/10Mb.dat" #big file test
# Streaming, so we can iterate over the response.
response = requests.get(url, stream=True)
total_size_in_bytes= int(response.headers.get('content-length', 0))
block_size = 1024 #1 Kibibyte
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open('test.dat', 'wb') as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data)) #change this to your widget in tkinter
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
The block_size is your file-size and the time-remaining can be calculated with the number of iterations performed per second with respect to the block-size that remains. Here is an alternative - How to measure download speed and progress using requests?
After trying to break down code from GitHub and find any youtube videos that talk about this I'm starting to give up, so I'm hoping one of you can please help me. All I want to be able to do is monitor a games memory addresses value. For example, let's say in the game Minecraft the health value and the memory address is:
Address: 001F6498
Value: 20
How do I turn this value into a variable in Python?
Code Thought Process:
import pywin32
pid = 5601
address = 001F6498
ReadProcessMemory(pid, address):
print(Value)
#In this example i would like it to print 20
You need to get a handle to the process first. Here is some code that does so using OpenProcess() FindWindow() and GetWindowThreadProcessId() to get the handle to the process. Also included is a little function to properly read the correct size variable and store it correctly. This method can be used to read pointers, utilizing "i" to denote an integer type.
import win32api
import win32gui
import win32process
from ctypes import *
from pymem import *
PROCESS_ALL_ACCESS = 0x1F0FFF
ReadProcessMemory = windll.kernel32.ReadProcessMemory
def read_memory(procId, address, type):
buffer = (ctypes.c_byte * getlenght(type))()
bytesRead = ctypes.c_ulonglong(0)
readlenght = getlenght(type)
ReadProcessMemory(procId, address, buffer, readlenght, byref(bytesRead))
return struct.unpack(type, buffer)[0]
hWnd = win32gui.FindWindow(0, ("WINDOW NAME HERE"))
pid=win32process.GetWindowThreadProcessId(hWnd)
handle = pymem.Pymem()
handle.open_process_from_id(pid[1])
procBaseAddress = handle.process_base
hProc = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, 0, pid[1])
value = ReadProcessMemory(hProc, ADDRESS_OF_VARIABLE_TO_READ, "i")
print(value)
Credits to a friend, puppetmaster, who taught me how to do this
I'm trying to build an API tool for creating 100+ campaigns at a time, but so far I keep running into timeout errors. I have a feeling it's because I'm not doing this as a batch/async request, but I can't seem to find straightforward instructions specifically for batch creating campaigns in Python. Any help would be GREATLY appreciated!
I have all the campaign details prepped and ready to go in a Google sheet, which my script then reads (using pygsheets) and attempts to create the campaigns. Here's what it looks like so far:
from facebookads.adobjects.campaign import Campaign
from facebookads.adobjects.adaccount import AdAccount
from facebookads.api import FacebookAdsApi
from facebookads.exceptions import FacebookRequestError
import time
import pygsheets
FacebookAdsApi.init(access_token=xxx)
gc = pygsheets.authorize(service_file='xxx/client_secret.json')
sheet = gc.open('Campaign Prep')
tab1 = sheet.worksheet_by_title('Input')
tab2 = sheet.worksheet_by_title('Output')
# gets range size, offsetting it by 1 to account for the range starting on row 2
row_range = len(tab1.get_values('A1', 'A', returnas='matrix', majdim='ROWS', include_empty=False))+1
# finds first empty row in the output sheet
start_row = len(tab2.get_values('A1', 'A', returnas='matrix', majdim='ROWS', include_empty=False))
def create_campaigns(row):
campaign = Campaign(parent_id=row[6])
campaign.update({
Campaign.Field.name: row[7],
Campaign.Field.objective: row[9],
Campaign.Field.buying_type: row[10],
})
c = campaign.remote_create(params={'status': Campaign.Status.active})
camp_name = c['name']
camp_id = 'cg:'+c['id']
return camp_name, camp_id
r = start_row
# there's a header so I have the range starting at 2
for x in range(2, int(row_range)):
r += 1
row = tab1.get_row(x)
camp_name, camp_id = create_campaigns(row)
# pastes the generated campaign ID, campaign name and account id back into the sheet
tab2.update_cells('A'+str(r)+':C'+str(r).format(r),[[camp_id, camp_name, row[6].rsplit('_',1)[1]]])
I've tried putting this in a try loop and if it runs into a FacebookRequestError have it do time.sleep(5) then keep trying, but I'm still running into timeout errors every 5 - 10 rows it loops through. When it doesn't timeout it does work, I guess I just need to figure out a way to make this handle big batches of campaigns more efficiently.
Any thoughts? I'm new to the Facebook API and I'm still a relative newb at Python, but I find this stuff so much fun! If anyone has any advice for how this script could be better (as well as general Python advice), I'd love to hear it! :)
Can you post the actual error message?
It sounds like what you are describing is that you hit the rate limits after making a certain amount of calls. If that is so, time.sleep(5) won't be enough. The rate score decays over time and will be reset after 5 minutes https://developers.facebook.com/docs/marketing-api/api-rate-limiting. In that case I would suggest making a sleep between each call instead. However a better option would be to upgrade your API status. If you hit the rate limits this fast I assume you are on Developer level. Try upgrading first to Basic and then Standard and you should not have these problems. https://developers.facebook.com/docs/marketing-api/access
Also, as you mention, utilizing Facebook's batch request API could be a good idea. https://developers.facebook.com/docs/marketing-api/asyncrequests/v2.11
Here is a thread with examples of the Batch API working with the Python SDK: https://github.com/facebook/facebook-python-ads-sdk/issues/116
I paste the code snippet (copied from the last link that #reaktard pasted), credit to github user #williardx
it helped me a lot in my development.
# ----------------------------------------------------------------------------
# Helper functions
def generate_batches(iterable, batch_size_limit):
# This function can be found in examples/batch_utils.py
batch = []
for item in iterable:
if len(batch) == batch_size_limit:
yield batch
batch = []
batch.append(item)
if len(batch):
yield batch
def success_callback(response):
batch_body_responses.append(response.body())
def error_callback(response):
# Error handling here
pass
# ----------------------------------------------------------------------------
batches = []
batch_body_responses = []
api = FacebookAdsApi.init(your_app_id, your_app_secret, your_access_token)
for ad_set_list in generate_batches(ad_sets, batch_limit):
next_batch = api.new_batch()
requests = [ad_set.get_insights(pending=True) for ad_set in ad_set_list]
for req in requests:
next_batch.add_request(req, success_callback, error_callback)
batches.append(next_batch)
for batch_request in batches:
batch_request.execute()
time.sleep(5)
print batch_body_responses
I am new to python and managed to write a little program (using python3) to retrieve information from a website. I have two problems:
I do not know how to tell python to wait each 80th step, so when i = 80, 160, 240 etc.
I do not know how to tell python to retrieve the information from the website how many steps exist in total (as this varies from page to page), see image below. I can see in the picture that the maximum amount of 260 is "hard-coded" in this example? Can I tell python to retrieve the 260 by itself (or any other number if this changes on another web page)?
How can I tell python to check which is the current page the script starts, so that it can adjust i to the page`s number? Normally I presume to start at page 0 (i = 0), but for example, if I were to start at page 30, my script shall be able to make i = 30 or if I start at 200, it shall be able to adjust i = 200 etc before it goes to the while loop.
Is it clear what I am troubling with?
This is the pseudo code:
import time
from selenium import webdriver
url = input('Please, enter url: ')
driver = webdriver.Firefox()
driver.get(url)
i = 0
while i > 260: # how to determine (book 1 = 260 / book 2 = 500)?
# do something
if i == 80: # each 80th page?
# pause
else:
# do something else
i = i + 1
else:
quit()
1) sleep
import time
....
if i % 80 == 0: # each 80th page?
# Wait for 5 seconds
time.sleep(5)
2) element selectors
html = driver.find_element_by_css_selector('afterInput').get_attribute('innerHTML')
3) arguments
import sys
....
currentPage = sys.argv[2]
or extract it from the source (see 2)
First, if you want to know if your i is "step"(devision) of 80 you can use the modulo sign, and check if it equal to 0, for instance:
if i % 80 == 0:
time.sleep(1) # One second
Second, you need to query the html you receive from the server, for instance:
from selenium import webdriver
url = input('Please, enter url: ')
driver = webdriver.Firefox()
driver.get(url)
total_pages = driver.find_element_by_css_selector('afterInput').get_attribute('innerHTML').split()[1] # Take only the number
after your edit: All you have to do is to is to assign i with this value you want by defining a variable in your script/parsing the arguments from the command line/scrape it from the website. This is Depends on your implementation and needs.
Other notes
I know you're on your beginning steps, but if you want to improve your code and make it a bit more pythonic I would do the following changes:
Using while and i = i + 1 is not a common pattern in python, instead use for i in range(total_pages) - of course you need to know the number of pages (from your second question)
There is no need to call quit(), your script will end anyway in the end of the file.
I think you meant while i < 260.
I have been cobbling together a script from multiple sources to poll my Raspberry Pi's internal SoC temperature. I want to then stream that data to my Plotly account.
I have a semi-working script but once the chip temp is read, it continues streaming that temperature indefinitely. As a noob to Python, I cannot seem to figure out how to take the temperature (on a user-set interval) and continuously update it with a fresh value. My code is below:
#!/usr/bin/env python
import plotly.plotly as py # plotly library
from plotly.graph_objs import * # all plotly graph objects
import json # used to parse config.json
import time # timer functions
import datetime
import os # used to acquire internal SoC temperature
import sys
# Initialize some variables with your credentials
with open('./config.json') as config_file:
plotly_user_config = json.load(config_file)
username = plotly_user_config['plotly_username']
api_key = plotly_user_config['plotly_api_key']
stream_token = plotly_user_config['plotly_streaming_tokens'][0]
# Initialize a Plotly Object
py.sign_in(username, api_key)
# Initialize your graph (not streaming yet)
data = [Scatter(
x=[],y=[],
mode='lines+markers',
stream={'token': stream_token, 'maxpoints': 1000},
name='UCBPD')
]
layout = Layout(
title='Raspberry Pi Temperature',
xaxis={'autorange': True, 'title': 'Time of Day'},
yaxis={'autorange': True, 'title': 'Degrees (Celsuis)'}
)
your_graph_url = py.plot(Figure(data=data, layout=layout), filename='Raspberry Pi Temp', auto_open=False)
# Acquire internal SoC temperature
cmd = '/opt/vc/bin/vcgencmd measure_temp'
line = os.popen(cmd).readline().strip()
if "error" in line:
print "Error ... is your firmware up-to-date? Run rpi-update"
else:
# line now contains something like: temp=41.2'C
# to get the temperature, split on =, and then on '
temp = line.split('=')[1].split("'")[0]
# Initialize the Plotly Streaming Object
stream = py.Stream(stream_token)
stream.open()
# Start looping and streaming!
while True:
stream.write({'x': datetime.datetime.now(), 'y': temp})
time.sleep(1) # delay between stream posts
Whatever code sets the value of the temp (temperature) variable needs to be in the while loop or else there is no way the temperature variable can change.
If you continue to have trouble you should simplify this by removing the config file and the graphing, and just print raw readings to the console.