I am trying to pull data from Bloomberg using Python API. API package comes with example codes and the programs that only requires local host work perfectly. However, the programs that uses other authorization ways are always stuck with the error:
Connecting to port 8194 on localhost
TokenGenerationFailure = {
reason = {
source = "apitkns (apiauth) on ebbdbp-ob-053"
category = "NO_AUTH"
errorCode = 12
description = "User not in emrs userid=NA\mds firm=22691"
subcategory = "INVALID_USER"
}
}
Failed to get token
No authorization
I saw one more person having similar problem but instead of solving it he chose to just use local host. I can't always use localhost because I will have to assist and troubleshoot for other users. So I need a hint how to overcome this error.
My question is how can I set the userid anything other than OS_LOGON which automatically uses the login credentials of my account so that I can use other users' name when needed? I tried to change OS_LOGON with the user name but it didn't work.
The full program I am trying to run is:
"""SnapshotRequestTemplateExample.py"""
from __future__ import print_function
from __future__ import absolute_import
import datetime
from optparse import OptionParser, OptionValueError
import blpapi
TOKEN_SUCCESS = blpapi.Name("TokenGenerationSuccess")
TOKEN_FAILURE = blpapi.Name("TokenGenerationFailure")
AUTHORIZATION_SUCCESS = blpapi.Name("AuthorizationSuccess")
TOKEN = blpapi.Name("token")
def authOptionCallback(_option, _opt, value, parser):
vals = value.split('=', 1)
if value == "user":
parser.values.auth = "AuthenticationType=OS_LOGON"
elif value == "none":
parser.values.auth = None
elif vals[0] == "app" and len(vals) == 2:
parser.values.auth = "AuthenticationMode=APPLICATION_ONLY;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + vals[1]
elif vals[0] == "userapp" and len(vals) == 2:
parser.values.auth = "AuthenticationMode=USER_AND_APPLICATION;"\
"AuthenticationType=OS_LOGON;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + vals[1]
elif vals[0] == "dir" and len(vals) == 2:
parser.values.auth = "AuthenticationType=DIRECTORY_SERVICE;"\
"DirSvcPropertyName=" + vals[1]
else:
raise OptionValueError("Invalid auth option '%s'" % value)
def parseCmdLine():
"""parse cli arguments"""
parser = OptionParser(description="Retrieve realtime data.")
parser.add_option("-a",
"--ip",
dest="hosts",
help="server name or IP (default: localhost)",
metavar="ipAddress",
action="append",
default=[])
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
parser.add_option("--auth",
dest="auth",
help="authentication option: "
"user|none|app=<app>|userapp=<app>|dir=<property>"
" (default: %default)",
metavar="option",
action="callback",
callback=authOptionCallback,
type="string",
default="user")
(opts, _) = parser.parse_args()
if not opts.hosts:
opts.hosts = ["localhost"]
if not opts.topics:
opts.topics = ["/ticker/IBM US Equity"]
return opts
def authorize(authService, identity, session, cid):
"""authorize the session for identity via authService"""
tokenEventQueue = blpapi.EventQueue()
session.generateToken(eventQueue=tokenEventQueue)
# Process related response
ev = tokenEventQueue.nextEvent()
token = None
if ev.eventType() == blpapi.Event.TOKEN_STATUS or \
ev.eventType() == blpapi.Event.REQUEST_STATUS:
for msg in ev:
print(msg)
if msg.messageType() == TOKEN_SUCCESS:
token = msg.getElementAsString(TOKEN)
elif msg.messageType() == TOKEN_FAILURE:
break
if not token:
print("Failed to get token")
return False
# Create and fill the authorization request
authRequest = authService.createAuthorizationRequest()
authRequest.set(TOKEN, token)
# Send authorization request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity, cid)
# Process related responses
startTime = datetime.datetime.today()
WAIT_TIME_SECONDS = 10
while True:
event = session.nextEvent(WAIT_TIME_SECONDS * 1000)
if event.eventType() == blpapi.Event.RESPONSE or \
event.eventType() == blpapi.Event.REQUEST_STATUS or \
event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in event:
print(msg)
if msg.messageType() == AUTHORIZATION_SUCCESS:
return True
print("Authorization failed")
return False
endTime = datetime.datetime.today()
if endTime - startTime > datetime.timedelta(seconds=WAIT_TIME_SECONDS):
return False
def main():
"""main entry point"""
global options
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
for idx, host in enumerate(options.hosts):
sessionOptions.setServerAddress(host, options.port, idx)
sessionOptions.setAuthenticationOptions(options.auth)
sessionOptions.setAutoRestartOnDisconnection(True)
print("Connecting to port %d on %s" % (
options.port, ", ".join(options.hosts)))
session = blpapi.Session(sessionOptions)
if not session.start():
print("Failed to start session.")
return
subscriptionIdentity = None
if options.auth:
subscriptionIdentity = session.createIdentity()
isAuthorized = False
authServiceName = "//blp/apiauth"
if session.openService(authServiceName):
authService = session.getService(authServiceName)
isAuthorized = authorize(authService, subscriptionIdentity,
session, blpapi.CorrelationId("auth"))
if not isAuthorized:
print("No authorization")
return
else:
print("Not using authorization")
.
.
.
.
.
finally:
session.stop()
if __name__ == "__main__":
print("SnapshotRequestTemplateExample")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...")
This example is intended for Bloomberg's BPIPE product and as such includes the necessary authorization code. For this example, if you're connecting to the Desktop API (typically localhost:8194) you would want to pass an auth parameter of "none". Note that this example is for the mktdata snapshot functionality which isn't supported by Desktop API.
You state you're trying to troubleshoot on behalf of other users, presumably traders using BPIPE under their credentials. In this case you would need to create an Identity object to represent that user.
This would be done thusly:
# Create and fill the authorization request
authRequest = authService.createAuthorizationRequest()
authRequest.set("authId", STRING_CONTAINING_USERS_EMRS_LOGON)
authRequest.set("ipAddress", STRING_OF_IP_ADDRESS_WHERE_USER_IS_LOGGED_INTO_TERMINAL)
# Send authorization request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity, cid)
Please be aware of potential licensing compliance issues when using this approach as this can have serious consequences. If in any doubt, approach your firm's market data team who will be able to ask their Bloomberg contacts.
Edit:
As asked in the comments, it's useful to elaborate on the other possible parameters for the AuthorizationRequest.
"uuid" + "ipAddress"; this would be the default method of authenticating users for Server API. On BPIPE this would require Bloomberg to explicitly enable it for you. The UUID is the unique integer identifier assigned to each Bloomberg Anywhere user. You can look this up in the terminal by running IAM
"emrsId" + "ipAddress"; "emrsId" is a deprecated alias for "authId". This shouldn't be used anymore.
"authId" + "ipAddress"; "authId" is the String defined in EMRS (the BPIPE Entitlements Management and Reporting System) or SAPE (the Server API's equivalent of EMRS) that represents each user. This would typically be that user's OS login details (e.g. DOMAIN/USERID) or Active Directory property (e.g. mail -> blah#blah.blah)
"authId" + "ipAddress" + "application"; "application" is the application name defined on EMRS/SAPE. This will check to see whether the user defined in authId is enabled for the named application on EMRS. Using one of these user+app style Identity objects in requests should record usage against both the user and application in the EMRS usage reports.
"token"; this is the preferred approach. Using the session.generateToken functionality (which can be seen in the original question's code snippet) will result in an alphanumeric string. You'd pass this as the only parameter into the Authorization request. Note that the token generation system is virtualization-aware; if it detects it's running in Citrix or a remote desktop it will report the IP address of the display machine (or one hop towards where the user actually is).
Related
So I am working on a coding project for my internship in DC. My project involves using python and microsoft graph api to build a program that checks the email addresses of employees obtained at my company to see if another authorization method has been added to the email address. If another authorization method is detected for an email address, it could mean that someone/a bad actor is trying to access information.
I have been referring to the video Getting Started With Microsoft Graph API For Python Development (Set Up & Authentication) by Jie Jenn. So far, I'm able to get a device code and link from the program, but I cannot obtain the authorization code. Aside from that, I am also getting a traceback error in line 31 of demo2.py, another 2 traceback errors in line 12 and 100 of main.py, and TypeError: 'dict' object is not callable in demo2.py.
Here is my code.
Thank You,
Sairam
Errors:
'''
Traceback (most recent call last):
File "C:\Users\S.Soundararajan\Documents\PE Project for Azure\demo2.py", line 31, in
webbrowser.open(flow('verification_uri'))
TypeError: 'dict' object is not callable
'''
'''
Traceback (most recent call last):
File "C:\Users\S.Soundararajan\Documents\PE Project for Azure\main.py", line 100, in
main()
File "C:\Users\S.Soundararajan\Documents\PE Project for Azure\main.py", line 12, in main
graph: Graph = Graph(azure_settings)
TypeError: Graph() takes no arguments
Python Graph Tutorial
'''
demo2.py:
`
#import account as account
import webbrowser
from xmlrpc.client import APPLICATION_ERROR
import requests
import msal
from msal import PublicClientApplication
CLIENT_ID = ''
CLIENT_SECRET = ''
authority_url = ''
base_url = 'https://graph.microsoft.com/v1.0/'
endpoint = base_url + 'me'
SCOPES = ['User.Read', 'Mail.Read', 'Mail.Send']
# Method 2. Login to acquire access_token
app = PublicClientApplication(
CLIENT_ID,
authority = authority_url
)
#accounts = app.get_accounts()
#if accounts:
#app.acquire_token_silent(scopes=SCOPES, account=account[0])
flow = app.initiate_device_flow(scopes=SCOPES)
print(flow)
print(flow['message'])
#app_code = flow['message']
webbrowser.open(flow('verification_uri'))
result = app.acquire_token_by_device_flow(flow)
access_token_id = result['access_token']
headers = {'Authorization': 'Bearer' + access_token_id}
response = requests.get(endpoint, headers=headers)
print(response)
print(response.json())
`
main.py:
import configparser
from graph import Graph
from msal import PublicClientApplication
def main():
print('Python Graph Tutorial\n')
# Load settings
config = configparser.ConfigParser()
config.read(['config.cfg', 'config.dev.cfg'])
azure_settings = config['azure']
graph: Graph = Graph(azure_settings)
greet_user(graph)
choice = -1
while choice != 0:
print('Please choose one of the following options:')
print('0. Exit')
print('1. Display access token')
print('2. List my inbox')
print('3. Send mail')
print('4. List users (requires app-only)')
print('5. Make a Graph call')
try:
choice = int(input())
except ValueError:
choice = -1
if choice == 0:
print('Goodbye...')
elif choice == 1:
display_access_token(graph)
elif choice == 2:
list_inbox(graph)
elif choice == 3:
send_mail(graph)
elif choice == 4:
list_users(graph)
elif choice == 5:
make_graph_call(graph)
else:
print('Invalid choice!\n')
def greet_user(graph: Graph):
user = graph.get_user()
print('Hello,', user['displayName'])
# For Work/school accounts, email is in mail property
# Personal accounts, email is in userPrincipalName
print('Email:', user['mail'] or user['userPrincipalName'], '\n')
def display_access_token(graph: Graph):
token = graph.get_user_token()
print('User token:', token, '\n')
return 1
def list_users(graph: Graph):
users_page = graph.get_users()
# Output each users's details
for user in users_page['value']:
print('User:', user['displayName'])
print(' ID:', user['id'])
print(' Email:', user['mail'])
# If #odata.nextLink is present
more_available = '#odata.nextLink' in users_page
print('\nMore users available?', more_available, '\n')
def list_inbox(graph: Graph):
message_page = graph.get_inbox()
# Output each message's details
for message in message_page['value']:
print('Message:', message['subject'])
print(' From:', message['from']['emailAddress']['name'])
print(' Status:', 'Read' if message['isRead'] else 'Unread')
print(' Received:', message['receivedDateTime'])
# If #odata.nextLink is present
more_available = '#odata.nextLink' in message_page
print('\nMore messages available?', more_available, '\n')
def send_mail(graph: Graph):
# Send mail to the signed-in user
# Get the user for their email address
user = graph.get_user()
user_email = user['mail'] or user['userPrincipalName']
graph.send_mail('Testing Microsoft Graph', 'Hello world!', user_email)
print('Mail sent.\n')
def make_graph_call(graph: Graph):
graph.make_graph_call()
# Run main
main()
`
Issue1:
On the code base, the mentioned url is not declared yet, and flow was not able to find it to be executed.
Usually, the key-value pair should always use square brackets to access the value inside. One of the codes mentioned in the thread is needed to use [] to access elements of a dictionary. Not () else will get the TypeError: The "dict" object is not callable error.
Solution:
authority_url= 'https://docs.python.org/'
webbrowser.open_new(authority_url) // same window
webbrowser.open_new_tab(authority_url) // will open in new tab
Issue 2:
refer this official tutorial.
I am pretty new to coding and aws chalice. I tried writing a code that gets messages from trading-view and executes orders depending on the signals.
I tested the code locally and everything worked fine, but when I test the Rest API I get the following error:
{"message":"Missing Authentication Token"}
I set up my credentials via "aws configure" as explained here: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html
I also created a config.txt file in my aws folder and checked my settings via "aws configure get" and they were fine.
The index function in the beginning worked too, so there should be a problem within my code?
I changed some values and cut some functions and the strategy part out, but the code looks somewhat like this:
from chalice import Chalice
from datetime import datetime
from binance.client import Client
from binance.enums import *
import ccxt
exchange = ccxt.binance({
'apiKey': 'KEY',
'secret': 'SECRET',
'enableRateLimit': True,
'options': {
'defaultType': 'future',
},
})
def buy_order(quantity, symbol, order_type = ORDER_TYPE_MARKET,side=SIDE_BUY,recvWindow=5000):
try:
print("sending order")
order = client.futures_create_order(symbol = symbol, type = order_type, side = side, quantity = quantity,recvWindow=recvWindow)
print(order)
except Exception as e:
print("an exception occured - {}".format(e))
return False
return True
app = Chalice(app_name='tradingview-webhook-alert')
indicator1 = "x"
indicator2 = "y"
TRADE_SYMBOL = "Test123"
in_position = False
def diff_time(time1, time2):
fmt = '%Y-%m-%dT%H:%M:%SZ'
tstamp1 = datetime.strptime(time1, fmt)
tstamp2 = datetime.strptime(time2, fmt)
if tstamp1 > tstamp2:
td = tstamp1 - tstamp2
else:
td = tstamp2 - tstamp1
td_mins = int(round(td.total_seconds() / 60))
return td_mins
#app.route('/test123', methods=['POST'])
def test123():
global indicator1, indicator2
request = app.current_request
message = request.json_body
indicator = message["indicator"]
price = message["price"]
value = message["value"]
if indicator == "indicator1":
indicator1 = value
if indicator == "indicator2":
indicator2 = value
if in_position == False:
if (indicator1 >123) & (indicator2 < 321):
balance = exchange.fetch_free_balance()
usd = float(balance['USDT'])
TRADE_QUANTITY = (usd / price)*0.1
order_succeeded = buy_order(TRADE_QUANTITY, TRADE_SYMBOL)
if order_succeeded:
in_position = True
return {"test": "123"}
I tested it locally with Insomnia and tried the Rest API link there and in my browser, both with the same error message. Is my testing method wrong or is it the code? But even then, why isn't the Rest API link working, when I include the index function from the beginning again? If I try the index function from the beginning, I get the {"message": "Internal server error"} .
This is probably a very very basic question but I couldn't find an answer online.
Any help would be appreciated!
I am not pretty sure if that helps you because I don't really understand your question but:
You are using a POST-request which will not be executed by opening a URL.
Try something like #app.route('/test123', methods=['POST', 'GET']) so that if you just open the URL, it will execute a GET-request
Some more information:
https://www.w3schools.com/tags/ref_httpmethods.asp
I can't understand from the documentation of azure for python SDK how can you create a new node-pool in an existing Kubernetes cluster?
it is easy to do it in the command line:
az aks nodepool add --cluster-name CLUSTER-NAME
--resource-group RESOURCE-GROUP
--name NODE-NAME
--enable-cluster-autoscaler
--kubernetes-version 1.15.7
--min-count 1
--max-count 4
--node-count 1
--node-vm-size Standard_NC6s_v2
How can I implement the exact same command using the python SDK?
currently, I am able to connect to the client like that :
# pip install azure
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.containerservice import ContainerServiceClient
credentials = ServicePrincipalCredentials(CLIENT,KEY,tenant = TENANT_ID)
client = ContainerServiceClient(credentials, subscription_id)
credentialResults = client.managed_clusters.list_cluster_user_credentials(resource_group, aks_service_name)
# How can I continue from here to create or delete a new nodepool?
Gow can I continue from here to create or delete a new nodepool?
You can make use of the following code to create nodepool,
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
To Add NodePool
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
Github reference
Thanks to Sajeetharan, I found the implementation of 'az aks nodepool add' in the azure-cli code:
GitHub ref: azure cli:
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
I'm looking for a way to pull reports for multiple mcc_account in one go using AdWords API for python, by below code I'm able to pass one MCC account at a time using google_ads.YAML file.
Is there a way to pass a multiple MCC accounts from CSV or text file and pull reports for them?
YAML file
# AdWordsClient configurations
adwords:
#############################################################################
# Required Fields #
#############################################################################
developer_token: XXXXXX
#############################################################################
# Optional Fields #
#############################################################################
client_customer_id: XXXX
user_agent: XXXX
# partial_failure: True
# validate_only: True
#############################################################################
# OAuth2 Configuration #
# Below you may provide credentials for either the installed application or #
# service account flows. Remove or comment the lines for the flow you're #
# not using. #
#############################################################################
# The following values configure the client for the installed application
# flow.
client_id: XXXXX
client_secret: XXXX
refresh_token: XXXXX
Code
import multiprocessing
import os
from Queue import Empty
import time
import googleads.adwords
import googleads.errors
# Timeout between retries in seconds.
BACKOFF_FACTOR = 5
# Maximum number of processes to spawn.
MAX_PROCESSES = multiprocessing.cpu_count()
# Maximum number of retries for 500 errors.
MAX_RETRIES = 5
# Maximum number of items to be sent in a single API response.
PAGE_SIZE = 100
# Directory to download the reports to.
REPORT_DOWNLOAD_DIRECTORY = '.'
def _DownloadReport(process_id, report_download_directory, customer_id,
report_definition):
report_downloader = (googleads.adwords.AdWordsClient.LoadFromStorage(' 'googleads.yaml')
.GetReportDownloader())
filepath = os.path.join(report_download_directory,
'adgroup_%d.csv' % customer_id)
retry_count = 0
while True:
print ('[%d/%d] Loading report for customer ID "%s" into "%s"...'
% (process_id, retry_count, customer_id, filepath))
try:
with open(filepath, 'wb') as handler:
report_downloader.DownloadReport(
report_definition, output=handler,
client_customer_id=customer_id)
return True, {'customerId': customer_id}
except googleads.errors.AdWordsReportError as e:
if e.code == 500 and retry_count < MAX_RETRIES:
time.sleep(retry_count * BACKOFF_FACTOR)
else:
print ('Report failed for customer ID "%s" with code "%d" after "%d" '
'retries.' % (customer_id, e.code, retry_count + 1))
return (False, {'customerId': customer_id, 'code': e.code,
'message': e.message})
class ReportWorker(multiprocessing.Process):
"""A worker Process used to download reports for a set of customer IDs."""
def __init__(self, report_download_directory, report_definition,
input_queue, success_queue, failure_queue):
"""Initializes a ReportWorker.
Args:
report_download_directory: A string indicating the directory where you
would like to download the reports.
report_definition: A dict containing the report definition that you would
like to run against all customer IDs in the input_queue.
input_queue: A Queue instance containing all of the customer IDs that
the report_definition will be run against.
success_queue: A Queue instance that the details of successful report
downloads will be saved to.
failure_queue: A Queue instance that the details of failed report
downloads will be saved to.
"""
super(ReportWorker, self).__init__()
self.report_download_directory = report_download_directory
self.report_definition = report_definition
self.input_queue = input_queue
self.success_queue = success_queue
self.failure_queue = failure_queue
def run(self):
while True:
try:
customer_id = self.input_queue.get(timeout=0.01)
except Empty:
break
result = _DownloadReport(self.ident, self.report_download_directory,
customer_id, self.report_definition)
(self.success_queue if result[0] else self.failure_queue).put(result[1])
def GetCustomerIDs(client):
"""Retrieves all CustomerIds in the account hierarchy.
Note that your configuration file must specify a client_customer_id belonging
to an AdWords manager account.
Args:
client: an AdWordsClient instance.
Raises:
Exception: if no CustomerIds could be found.
Returns:
A Queue instance containing all CustomerIds in the account hierarchy.
"""
# For this example, we will use ManagedCustomerService to get all IDs in
# hierarchy that do not belong to MCC accounts.
managed_customer_service = client.GetService('ManagedCustomerService',
version='v201809')
offset = 0
# Get the account hierarchy for this account.
selector = {
'fields': ['CustomerId'],
'predicates': [{
'field': 'CanManageClients',
'operator': 'EQUALS',
'values': [False]
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
# Using Queue to balance load between processes.
queue = multiprocessing.Queue()
more_pages = True
while more_pages:
page = managed_customer_service.get(selector)
if page and 'entries' in page and page['entries']:
for entry in page['entries']:
queue.put(entry['customerId'])
else:
raise Exception('Can\'t retrieve any customer ID.')
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
return queue
def main(client, report_download_directory):
# Determine list of customer IDs to retrieve report for.
input_queue = GetCustomerIDs(client)
reports_succeeded = multiprocessing.Queue()
reports_failed = multiprocessing.Queue()
# Create report definition.
report_definition = {
'reportName': 'Custom ADGROUP_PERFORMANCE_REPORT',
'dateRangeType': 'LAST_7_DAYS',
'reportType': 'ADGROUP_PERFORMANCE_REPORT',
'downloadFormat': 'CSV',
'selector': {
'fields': ['CampaignId', 'AdGroupId', 'Impressions', 'Clicks',
'Cost'],
# Predicates are optional.
'predicates': {
'field': 'AdGroupStatus',
'operator': 'IN',
'values': ['ENABLED', 'PAUSED']
}
},
}
queue_size = input_queue.qsize()
num_processes = min(queue_size, MAX_PROCESSES)
print 'Retrieving %d reports with %d processes:' % (queue_size, num_processes)
# Start all the processes.
processes = [ReportWorker(report_download_directory,
report_definition, input_queue, reports_succeeded,
reports_failed)
for _ in range(num_processes)]
for process in processes:
process.start()
for process in processes:
process.join()
print 'Finished downloading reports with the following results:'
while True:
try:
success = reports_succeeded.get(timeout=0.01)
except Empty:
break
print '\tReport for CustomerId "%d" succeeded.' % success['customerId']
while True:
try:
failure = reports_failed.get(timeout=0.01)
except Empty:
break
print ('\tReport for CustomerId "%d" failed with error code "%s" and '
'message: %s.' % (failure['customerId'], failure['code'],
failure['message']))
if __name__ == '__main__':
adwords_client = googleads.adwords.AdWordsClient.LoadFromStorage(
'googleads.yaml')
main(adwords_client, REPORT_DOWNLOAD_DIRECTORY)
How can I get the performance reports for multiple MCC accounts?
You need to create different googleads.adwords.AdWordsClient instance for achieving the same as one client can only work with one adwords account (mcc or single account).
To create AdWordsClient instance, you can automate the flow without using YAML file for configuration and use below code to create the same (rest code will remain the same) -
"""Initializes a AdManagerClient without using yaml-cached credentials.
While our LoadFromStorage method provides a useful shortcut to instantiate a
client if you regularly use just one set of credentials, production applications
may need to swap out users. This example shows you how to create an OAuth2
client and a AdManagerClient without relying on a yaml file.
"""
from googleads import ad_manager
from googleads import oauth2
# OAuth2 credential information. In a real application, you'd probably be
# pulling these values from a credential storage.
CLIENT_ID = 'INSERT_CLIENT_ID_HERE'
CLIENT_SECRET = 'INSERT_CLIENT_SECRET_HERE'
REFRESH_TOKEN = 'INSERT_REFRESH_TOKEN_HERE'
# Ad Manager API information.
APPLICATION_NAME = 'INSERT_APPLICATION_NAME_HERE'
# Client customer id
CLIENT_CUSTOMER_ID = 'INSERT_CLIENT_CUSTOMER_ID_HERE'
def main(client_id, client_secret, refresh_token, application_name):
oauth2_client = oauth2.GoogleRefreshTokenClient(
client_id, client_secret, refresh_token)
ad_manager_client = ad_manager.AdManagerClient(
oauth2_client, application_name,client_customer_id=CLIENT_CUSTOMER_ID)
networks = ad_manager_client.GetService('NetworkService').getAllNetworks()
for network in networks:
print ('Network with network code "%s" and display name "%s" was found.'
% (network['networkCode'], network['displayName']))
if __name__ == '__main__':
main(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, APPLICATION_NAME)
Code reference
I am trying to run an IBpy in my linux server machine, I am using IBgateway to connect my api code to IB.
I am ordering a limit order, the problem is that the IBgateway is terminating my client connection.
As soon as it places the order the connection will be closed, making me unable to get the order status.
(This same code works perfectly when I run it in the Windows machine.)
The code I am using to place order:
def place_single_order(self,order_type,action,lmtprice,expiry_date,quantity,conn) :
conn=Connection.create(host='localhost', port=7496, clientId=1,receiver=ib, sender=None, dispatcher=None)
conn.connect()
conn.register(self.error_handler, 'Error')
conn.register(self.executed_order, message.execDetails)
conn.register(self.validids,message.nextValidId)
conn.register(self.my_order_status,message.orderStatus)
newContract = Contract()
newContract.m_symbol = 'ES'
newContract.m_secType = 'FUT'
newContract.m_exchange = 'GLOBEX'
newContract.m_currency = 'USD'
newContract.m_expiry = expiry_date
order = Order()
order.m_action = action
order.m_totalQuantity = quantity
order.m_transmit=True
order.m_orderType = order_type
if lmtprice != 0 and order_type=='LMT' :
order.m_lmtPrice=lmtprice
elif lmtprice != 0 and order_type=='STP' :
order.m_auxPrice=lmtprice
else :
pass
oid=self.new_orderID(conn) #this is to get the new orderid from IB by #
conn.placeOrder(oid,newContract,order)
I think you should just add
time.sleep(10)
after placed the order to see the order status. And of course you should register a handler for those response message.