I'm looking for a way to pull reports for multiple mcc_account in one go using AdWords API for python, by below code I'm able to pass one MCC account at a time using google_ads.YAML file.
Is there a way to pass a multiple MCC accounts from CSV or text file and pull reports for them?
YAML file
# AdWordsClient configurations
adwords:
#############################################################################
# Required Fields #
#############################################################################
developer_token: XXXXXX
#############################################################################
# Optional Fields #
#############################################################################
client_customer_id: XXXX
user_agent: XXXX
# partial_failure: True
# validate_only: True
#############################################################################
# OAuth2 Configuration #
# Below you may provide credentials for either the installed application or #
# service account flows. Remove or comment the lines for the flow you're #
# not using. #
#############################################################################
# The following values configure the client for the installed application
# flow.
client_id: XXXXX
client_secret: XXXX
refresh_token: XXXXX
Code
import multiprocessing
import os
from Queue import Empty
import time
import googleads.adwords
import googleads.errors
# Timeout between retries in seconds.
BACKOFF_FACTOR = 5
# Maximum number of processes to spawn.
MAX_PROCESSES = multiprocessing.cpu_count()
# Maximum number of retries for 500 errors.
MAX_RETRIES = 5
# Maximum number of items to be sent in a single API response.
PAGE_SIZE = 100
# Directory to download the reports to.
REPORT_DOWNLOAD_DIRECTORY = '.'
def _DownloadReport(process_id, report_download_directory, customer_id,
report_definition):
report_downloader = (googleads.adwords.AdWordsClient.LoadFromStorage(' 'googleads.yaml')
.GetReportDownloader())
filepath = os.path.join(report_download_directory,
'adgroup_%d.csv' % customer_id)
retry_count = 0
while True:
print ('[%d/%d] Loading report for customer ID "%s" into "%s"...'
% (process_id, retry_count, customer_id, filepath))
try:
with open(filepath, 'wb') as handler:
report_downloader.DownloadReport(
report_definition, output=handler,
client_customer_id=customer_id)
return True, {'customerId': customer_id}
except googleads.errors.AdWordsReportError as e:
if e.code == 500 and retry_count < MAX_RETRIES:
time.sleep(retry_count * BACKOFF_FACTOR)
else:
print ('Report failed for customer ID "%s" with code "%d" after "%d" '
'retries.' % (customer_id, e.code, retry_count + 1))
return (False, {'customerId': customer_id, 'code': e.code,
'message': e.message})
class ReportWorker(multiprocessing.Process):
"""A worker Process used to download reports for a set of customer IDs."""
def __init__(self, report_download_directory, report_definition,
input_queue, success_queue, failure_queue):
"""Initializes a ReportWorker.
Args:
report_download_directory: A string indicating the directory where you
would like to download the reports.
report_definition: A dict containing the report definition that you would
like to run against all customer IDs in the input_queue.
input_queue: A Queue instance containing all of the customer IDs that
the report_definition will be run against.
success_queue: A Queue instance that the details of successful report
downloads will be saved to.
failure_queue: A Queue instance that the details of failed report
downloads will be saved to.
"""
super(ReportWorker, self).__init__()
self.report_download_directory = report_download_directory
self.report_definition = report_definition
self.input_queue = input_queue
self.success_queue = success_queue
self.failure_queue = failure_queue
def run(self):
while True:
try:
customer_id = self.input_queue.get(timeout=0.01)
except Empty:
break
result = _DownloadReport(self.ident, self.report_download_directory,
customer_id, self.report_definition)
(self.success_queue if result[0] else self.failure_queue).put(result[1])
def GetCustomerIDs(client):
"""Retrieves all CustomerIds in the account hierarchy.
Note that your configuration file must specify a client_customer_id belonging
to an AdWords manager account.
Args:
client: an AdWordsClient instance.
Raises:
Exception: if no CustomerIds could be found.
Returns:
A Queue instance containing all CustomerIds in the account hierarchy.
"""
# For this example, we will use ManagedCustomerService to get all IDs in
# hierarchy that do not belong to MCC accounts.
managed_customer_service = client.GetService('ManagedCustomerService',
version='v201809')
offset = 0
# Get the account hierarchy for this account.
selector = {
'fields': ['CustomerId'],
'predicates': [{
'field': 'CanManageClients',
'operator': 'EQUALS',
'values': [False]
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
# Using Queue to balance load between processes.
queue = multiprocessing.Queue()
more_pages = True
while more_pages:
page = managed_customer_service.get(selector)
if page and 'entries' in page and page['entries']:
for entry in page['entries']:
queue.put(entry['customerId'])
else:
raise Exception('Can\'t retrieve any customer ID.')
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
return queue
def main(client, report_download_directory):
# Determine list of customer IDs to retrieve report for.
input_queue = GetCustomerIDs(client)
reports_succeeded = multiprocessing.Queue()
reports_failed = multiprocessing.Queue()
# Create report definition.
report_definition = {
'reportName': 'Custom ADGROUP_PERFORMANCE_REPORT',
'dateRangeType': 'LAST_7_DAYS',
'reportType': 'ADGROUP_PERFORMANCE_REPORT',
'downloadFormat': 'CSV',
'selector': {
'fields': ['CampaignId', 'AdGroupId', 'Impressions', 'Clicks',
'Cost'],
# Predicates are optional.
'predicates': {
'field': 'AdGroupStatus',
'operator': 'IN',
'values': ['ENABLED', 'PAUSED']
}
},
}
queue_size = input_queue.qsize()
num_processes = min(queue_size, MAX_PROCESSES)
print 'Retrieving %d reports with %d processes:' % (queue_size, num_processes)
# Start all the processes.
processes = [ReportWorker(report_download_directory,
report_definition, input_queue, reports_succeeded,
reports_failed)
for _ in range(num_processes)]
for process in processes:
process.start()
for process in processes:
process.join()
print 'Finished downloading reports with the following results:'
while True:
try:
success = reports_succeeded.get(timeout=0.01)
except Empty:
break
print '\tReport for CustomerId "%d" succeeded.' % success['customerId']
while True:
try:
failure = reports_failed.get(timeout=0.01)
except Empty:
break
print ('\tReport for CustomerId "%d" failed with error code "%s" and '
'message: %s.' % (failure['customerId'], failure['code'],
failure['message']))
if __name__ == '__main__':
adwords_client = googleads.adwords.AdWordsClient.LoadFromStorage(
'googleads.yaml')
main(adwords_client, REPORT_DOWNLOAD_DIRECTORY)
How can I get the performance reports for multiple MCC accounts?
You need to create different googleads.adwords.AdWordsClient instance for achieving the same as one client can only work with one adwords account (mcc or single account).
To create AdWordsClient instance, you can automate the flow without using YAML file for configuration and use below code to create the same (rest code will remain the same) -
"""Initializes a AdManagerClient without using yaml-cached credentials.
While our LoadFromStorage method provides a useful shortcut to instantiate a
client if you regularly use just one set of credentials, production applications
may need to swap out users. This example shows you how to create an OAuth2
client and a AdManagerClient without relying on a yaml file.
"""
from googleads import ad_manager
from googleads import oauth2
# OAuth2 credential information. In a real application, you'd probably be
# pulling these values from a credential storage.
CLIENT_ID = 'INSERT_CLIENT_ID_HERE'
CLIENT_SECRET = 'INSERT_CLIENT_SECRET_HERE'
REFRESH_TOKEN = 'INSERT_REFRESH_TOKEN_HERE'
# Ad Manager API information.
APPLICATION_NAME = 'INSERT_APPLICATION_NAME_HERE'
# Client customer id
CLIENT_CUSTOMER_ID = 'INSERT_CLIENT_CUSTOMER_ID_HERE'
def main(client_id, client_secret, refresh_token, application_name):
oauth2_client = oauth2.GoogleRefreshTokenClient(
client_id, client_secret, refresh_token)
ad_manager_client = ad_manager.AdManagerClient(
oauth2_client, application_name,client_customer_id=CLIENT_CUSTOMER_ID)
networks = ad_manager_client.GetService('NetworkService').getAllNetworks()
for network in networks:
print ('Network with network code "%s" and display name "%s" was found.'
% (network['networkCode'], network['displayName']))
if __name__ == '__main__':
main(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, APPLICATION_NAME)
Code reference
Related
I am trying to pull data from Bloomberg using Python API. API package comes with example codes and the programs that only requires local host work perfectly. However, the programs that uses other authorization ways are always stuck with the error:
Connecting to port 8194 on localhost
TokenGenerationFailure = {
reason = {
source = "apitkns (apiauth) on ebbdbp-ob-053"
category = "NO_AUTH"
errorCode = 12
description = "User not in emrs userid=NA\mds firm=22691"
subcategory = "INVALID_USER"
}
}
Failed to get token
No authorization
I saw one more person having similar problem but instead of solving it he chose to just use local host. I can't always use localhost because I will have to assist and troubleshoot for other users. So I need a hint how to overcome this error.
My question is how can I set the userid anything other than OS_LOGON which automatically uses the login credentials of my account so that I can use other users' name when needed? I tried to change OS_LOGON with the user name but it didn't work.
The full program I am trying to run is:
"""SnapshotRequestTemplateExample.py"""
from __future__ import print_function
from __future__ import absolute_import
import datetime
from optparse import OptionParser, OptionValueError
import blpapi
TOKEN_SUCCESS = blpapi.Name("TokenGenerationSuccess")
TOKEN_FAILURE = blpapi.Name("TokenGenerationFailure")
AUTHORIZATION_SUCCESS = blpapi.Name("AuthorizationSuccess")
TOKEN = blpapi.Name("token")
def authOptionCallback(_option, _opt, value, parser):
vals = value.split('=', 1)
if value == "user":
parser.values.auth = "AuthenticationType=OS_LOGON"
elif value == "none":
parser.values.auth = None
elif vals[0] == "app" and len(vals) == 2:
parser.values.auth = "AuthenticationMode=APPLICATION_ONLY;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + vals[1]
elif vals[0] == "userapp" and len(vals) == 2:
parser.values.auth = "AuthenticationMode=USER_AND_APPLICATION;"\
"AuthenticationType=OS_LOGON;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + vals[1]
elif vals[0] == "dir" and len(vals) == 2:
parser.values.auth = "AuthenticationType=DIRECTORY_SERVICE;"\
"DirSvcPropertyName=" + vals[1]
else:
raise OptionValueError("Invalid auth option '%s'" % value)
def parseCmdLine():
"""parse cli arguments"""
parser = OptionParser(description="Retrieve realtime data.")
parser.add_option("-a",
"--ip",
dest="hosts",
help="server name or IP (default: localhost)",
metavar="ipAddress",
action="append",
default=[])
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
parser.add_option("--auth",
dest="auth",
help="authentication option: "
"user|none|app=<app>|userapp=<app>|dir=<property>"
" (default: %default)",
metavar="option",
action="callback",
callback=authOptionCallback,
type="string",
default="user")
(opts, _) = parser.parse_args()
if not opts.hosts:
opts.hosts = ["localhost"]
if not opts.topics:
opts.topics = ["/ticker/IBM US Equity"]
return opts
def authorize(authService, identity, session, cid):
"""authorize the session for identity via authService"""
tokenEventQueue = blpapi.EventQueue()
session.generateToken(eventQueue=tokenEventQueue)
# Process related response
ev = tokenEventQueue.nextEvent()
token = None
if ev.eventType() == blpapi.Event.TOKEN_STATUS or \
ev.eventType() == blpapi.Event.REQUEST_STATUS:
for msg in ev:
print(msg)
if msg.messageType() == TOKEN_SUCCESS:
token = msg.getElementAsString(TOKEN)
elif msg.messageType() == TOKEN_FAILURE:
break
if not token:
print("Failed to get token")
return False
# Create and fill the authorization request
authRequest = authService.createAuthorizationRequest()
authRequest.set(TOKEN, token)
# Send authorization request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity, cid)
# Process related responses
startTime = datetime.datetime.today()
WAIT_TIME_SECONDS = 10
while True:
event = session.nextEvent(WAIT_TIME_SECONDS * 1000)
if event.eventType() == blpapi.Event.RESPONSE or \
event.eventType() == blpapi.Event.REQUEST_STATUS or \
event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in event:
print(msg)
if msg.messageType() == AUTHORIZATION_SUCCESS:
return True
print("Authorization failed")
return False
endTime = datetime.datetime.today()
if endTime - startTime > datetime.timedelta(seconds=WAIT_TIME_SECONDS):
return False
def main():
"""main entry point"""
global options
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
for idx, host in enumerate(options.hosts):
sessionOptions.setServerAddress(host, options.port, idx)
sessionOptions.setAuthenticationOptions(options.auth)
sessionOptions.setAutoRestartOnDisconnection(True)
print("Connecting to port %d on %s" % (
options.port, ", ".join(options.hosts)))
session = blpapi.Session(sessionOptions)
if not session.start():
print("Failed to start session.")
return
subscriptionIdentity = None
if options.auth:
subscriptionIdentity = session.createIdentity()
isAuthorized = False
authServiceName = "//blp/apiauth"
if session.openService(authServiceName):
authService = session.getService(authServiceName)
isAuthorized = authorize(authService, subscriptionIdentity,
session, blpapi.CorrelationId("auth"))
if not isAuthorized:
print("No authorization")
return
else:
print("Not using authorization")
.
.
.
.
.
finally:
session.stop()
if __name__ == "__main__":
print("SnapshotRequestTemplateExample")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...")
This example is intended for Bloomberg's BPIPE product and as such includes the necessary authorization code. For this example, if you're connecting to the Desktop API (typically localhost:8194) you would want to pass an auth parameter of "none". Note that this example is for the mktdata snapshot functionality which isn't supported by Desktop API.
You state you're trying to troubleshoot on behalf of other users, presumably traders using BPIPE under their credentials. In this case you would need to create an Identity object to represent that user.
This would be done thusly:
# Create and fill the authorization request
authRequest = authService.createAuthorizationRequest()
authRequest.set("authId", STRING_CONTAINING_USERS_EMRS_LOGON)
authRequest.set("ipAddress", STRING_OF_IP_ADDRESS_WHERE_USER_IS_LOGGED_INTO_TERMINAL)
# Send authorization request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity, cid)
Please be aware of potential licensing compliance issues when using this approach as this can have serious consequences. If in any doubt, approach your firm's market data team who will be able to ask their Bloomberg contacts.
Edit:
As asked in the comments, it's useful to elaborate on the other possible parameters for the AuthorizationRequest.
"uuid" + "ipAddress"; this would be the default method of authenticating users for Server API. On BPIPE this would require Bloomberg to explicitly enable it for you. The UUID is the unique integer identifier assigned to each Bloomberg Anywhere user. You can look this up in the terminal by running IAM
"emrsId" + "ipAddress"; "emrsId" is a deprecated alias for "authId". This shouldn't be used anymore.
"authId" + "ipAddress"; "authId" is the String defined in EMRS (the BPIPE Entitlements Management and Reporting System) or SAPE (the Server API's equivalent of EMRS) that represents each user. This would typically be that user's OS login details (e.g. DOMAIN/USERID) or Active Directory property (e.g. mail -> blah#blah.blah)
"authId" + "ipAddress" + "application"; "application" is the application name defined on EMRS/SAPE. This will check to see whether the user defined in authId is enabled for the named application on EMRS. Using one of these user+app style Identity objects in requests should record usage against both the user and application in the EMRS usage reports.
"token"; this is the preferred approach. Using the session.generateToken functionality (which can be seen in the original question's code snippet) will result in an alphanumeric string. You'd pass this as the only parameter into the Authorization request. Note that the token generation system is virtualization-aware; if it detects it's running in Citrix or a remote desktop it will report the IP address of the display machine (or one hop towards where the user actually is).
getting "Invalid requests[0].createImage: Access to the provided image was forbidden."
on adding image from google drive to google slides
Here below the code snippet I've tried
although making the image public through advanced sharing option.
the service account has edit access to the slide I want to add image to it.
import section: add here the required dependencies
# python build-in modules
import json
import time
import io
# external modules
from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from apiclient.http import MediaFileUpload
# user modules
from via_types import BasePlugin
class GoogleSlidesPlugin(BasePlugin):
"""
this plugin allows to communicate with Google API and Google Spreadsheets
by using the already created credentials
"""
def __init__(self):
BasePlugin.__init__(self)
self.Scopes = ['https://www.googleapis.com/auth/drive','https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.readonly','https://www.googleapis.com/auth/presentations','https://www.googleapis.com/auth/presentations.readonly']
self.Creds = ServiceAccountCredentials.from_json_keyfile_name(
r'Libraries/credentials/lucky-dahlia-268410-c4c0c57c1908.json', self.Scopes)
self.service = build('slides', 'v1', credentials=self.Creds)
self.drive = build('drive', 'v3', credentials=self.Creds)
def add_images_to_gslides(self, args):
"""
Adds a new worksheet to a spreadsheet.
:param args: [0]: Presentation
[1]: images array Paths
[2]: slide number to insert images
:param uid: this value is automatically generated for groped steps
:return: the detail and status execution of plugin
"""
# start results to report to output
self.response.status = 'pass'
self.response.details = 'image/s was/were added to slides'
try:
# try to store the passed arguments0
presentation_id = args[0]
except Exception as e:
# if there is a missed argument the command is going to end its exec
# store the results to report in output
self.response.status = 'fail'
self.response.details = 'some argument was missed'
return self.response
try:
# open connection with an specific GSlide
presentation = self.service.presentations().get(presentationId=presentation_id).execute()
slides = presentation.get('slides')
print('The presentation contains {} slides:'.format(len(slides)))
for i, slide in enumerate(slides):
print('- Slide #{} contains {} elements.'.format(
i + 1, len(slide.get('pageElements'))))
IMG_FILE = args[1]
try :
print('** Searching for icon file')
rsp = self.drive.files().list(q="name='%s'" % IMG_FILE).execute().get('files')[0]
print(' - Found image %r' % rsp['name'])
IMAGE_URL = 'https://drive.google.com/uc?export=download&id=' + rsp['id']
#IMAGE_URL = 'https://drive.google.com/file/d/'+rsp['id']+ '/edit'
except Exception as e:
file_metadata = {'name': IMG_FILE}
media = MediaFileUpload(IMG_FILE, mimetype='image/png')
upload = self.drive.files().create(body=file_metadata, media_body=media, fields='webContentLink, id, webViewLink').execute()
fileId = upload.get('id')
IMAGE_URL = upload.get('webContentLink')
self.drive.permissions().create(fileId=fileId, body={'type': 'anyone','role': 'writer'}).execute()
print(IMAGE_URL)
requests = []
image_id = 'MyImage_02'
page_id = slide['objectId']
emu4M = {
'magnitude': 4000000,
'unit': 'EMU'
}
requests.append({
'createImage': {
'objectId': image_id,
'url': IMAGE_URL,
'elementProperties': {
'pageObjectId': page_id,
'size': {
'height': emu4M,
'width': emu4M
},
'transform': {
'scaleX': 1,
'scaleY': 1,
'translateX': 100000,
'translateY': 100000,
'unit': 'EMU'
}
}
}
})
body = {
'requests': requests
}
response = self.service.presentations() \
.batchUpdate(presentationId=presentation_id, body=body ).execute()
create_image_response = response.get('replies')[0].get('createImage')
print('Created image with ID: {0}'.format(
create_image_response.get('objectId')))
except Exception as e:
self.response.status = 'fail'
self.response.details = e
return self.response
if __name__ == '__main__':
# use_the_main_function_to_test_your_plugin_without_using_a_sequence
plugin_template = GoogleSlidesPlugin()
plugin_template.add_images_to_gslides([presentationID,'image.png'])
print(plugin_template.response.status,
plugin_template.response.details)
I also wanted to find a way to upload images to Google Slides using Python. I created a program that uploads images to Google Cloud Storage (instead of Google Drive); creates a signed URL for them; then uses that URL to import images into Google Slides. It's available here under the MIT License and may be a helpful tool/resource.
I have created a Cloud Compute Engine instance on Debian, and have successfully created a PUSH subscription to a topic with
from google.cloud import pubsub_v1
project_id = "censored"
topic_name = "censored"
subscription_name = "censored"
endpoint = "https://censored.appspot.com/pubsub/push?token=censored"
def create_push_subscription(project_id,
topic_name,
subscription_name,
endpoint):
"""Create a new push subscription on the given topic."""
# [START pubsub_create_push_subscription]
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_name)
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=endpoint)
subscription = subscriber.create_subscription(
subscription_path, topic_path, push_config)
print('Push subscription created: {}'.format(subscription))
print('Endpoint for subscription is: {}'.format(endpoint))
# [END pubsub_create_push_subscription]
create_push_subscription(project_id, topic_name, subscription_name, endpoint)
but I'm not sure how exactly incoming messages arrive. I have found this sample code to parse messages, but I'm not sure how to get it to monitor in the background and 'activate' whenever incoming messages arrive.
import argparse
import base64
import json
import sys
import time
from google.cloud import pubsub_v1
def summarize(message):
# [START parse_message]
data = message.data.decode('utf-8')
attributes = message.attributes
name = attributes['name']
time_created = attributes['timeCreated']
bucket_id = attributes['bucketId']
object_id = attributes['objectId']
generation = attributes['objectGeneration']
description = (
'\tName: {name}\n'
'\tTime Created: {time_created}\n'
'\tBucket ID: {bucket_id}\n'
'\tObject ID: {object_id}\n'
'\tGeneration: {generation}\n'
).format(
name=name,
time_created=time_created,
bucket_id=bucket_id,
object_id=object_id,
generation=generation
)
if 'overwroteGeneration' in attributes:
description += '\tOverwrote generation: %s\n' % (
attributes['overwroteGeneration'])
if 'overwrittenByGeneration' in attributes:
description += '\tOverwritten by generation: %s\n' % (
attributes['overwrittenByGeneration'])
payload_format = attributes['payloadFormat']
if payload_format == 'JSON_API_V1':
object_metadata = json.loads(data)
name = object_metadata['name']
time_created = object_metadata['timeCreated']
size = object_metadata['size']
content_type = object_metadata['contentType']
metageneration = object_metadata['metageneration']
description += (
'\tName: {name}\n'
'\tTime Created: {time_created}\n'
'\tContent type: {content_type}\n'
'\tSize: {object_size}\n'
'\tMetageneration: {metageneration}\n'
).format(
name=name,
time_created=time_created,
content_type=content_type,
object_size=size,
metageneration=metageneration
)
return description
print('Note for developer: If BucketId and ObjectId listed, utf encoding.')
print('If not, JSON_V1 encoding. Adjust accordingly.')
# [END parse_message]
while(True):
print("signpost 1")
summarize(message)
print("signpost 2")
time.sleep(10)
print("signpost 3")
For example, this code will return
NameError: name 'message' is not defined
which is expected...
Could someone please help me set it up properly?
I know it's different in PULL because then the message will be defined during the pull, but I'd like to keep it as PUSH, if possible.
You need to create a long-running process which is either able to continuously poll for new messages (pull subscription) or have a reachable endpoint to receive new messages (push subscription).
See the example here: https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/pubsub/cloud-client/subscriber.py, as well as the differences between push and pull here: https://cloud.google.com/pubsub/docs/subscriber
Traceback (most recent call last):
File "/Users/jondevereux/Desktop/Data reporting/kpex_code/1PD/api_python_publisher_1PD.py", line 40, in <module>
username = parser.get('api_samples', 'username')
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 607, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'api_samples'
The config file is in the correct directory (same as .py and has the appropriate section api_samples:
[api_samples]
authentication_url = https://crowdcontrol.lotame.com/auth/v1/tickets
api_url = https://api.lotame.com/2/
username = xxx
password = xxx
Script works on co-workers PC not on mine? I had to use pip to install requests - i'm wondering I i'm missing something else?
Code is as follows:
# Set up the libs we need
import requests
import sys
import csv
import json
from ConfigParser import SafeConfigParser # used to get information from a config file
reload(sys)
sys.setdefaultencoding('utf-8')
'''
Now let's get what we need from our config file, including the username and password
We are assuming we have a config file called config.config in the same directory
where this python script is run, where the config file looks like:
[api_samples]
authentication_url = https://crowdcontrol.lotame.com/auth/v1/tickets
api_url = https://api.lotame.com/2/
username = USERNAME_FOR_API_CALLS
password = PASSWORD
'''
# Set up our Parser and get the values - usernames and password should never be in code!
parser = SafeConfigParser()
parser.read('config.cfg')
username = parser.get('api_samples', 'username')
password = parser.get('api_samples', 'password')
authentication_url = parser.get('api_samples', 'authentication_url')
base_api_url = parser.get('api_samples', 'api_url')
# OK, all set with our parameters, let's get ready to make our call to get a Ticket Granting Ticket
# Add the username and password to the payload (requests encodes for us, no need to urlencode)
payload = {'username': username,
'password': password}
# We want to set some headers since we are going to post some url encoded params.
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "User-Agent":"python" }
# Now, let's make our Ticket Granting Ticket request. We get the location from the response header
tg_ticket_location = requests.post(authentication_url, data=payload).headers['location']
# Let's take a look at what a Ticket Granting Ticket looks like:
# print ('Ticket Granting Ticket - %s \n') % (tg_ticket_location[tg_ticket_location.rfind('/') + 1:])
# Now we have our Ticket Granting Ticket, we can get a service ticket for the service we want to call
# The first service call will be to get information on behavior id 5990.
# service_call = base_api_url + 'behaviors/5990'
# Add the service call to the payload and get the ticket
#payload = {'service': service_call}
#service_ticket = requests.post( tg_ticket_location, data=payload ).text
# Let's take a look at the service ticket
#print ('Here is our Service Ticket - %s \n') % ( service_ticket )
'''
Now let's make our call to the service ... remember we need to be quick about it because
we only have 10 seconds to do it before the Service Ticket expires.
A couple of things to note:
JSON is the default response, and it is what we want, so we don't need to specify
like {'Accept':'application/json'}, but we will anyway because it is a good practice.
We don't need to pass any parameters to this call, so we just add the parameter
notation and then 'ticket=[The Service Ticet]'
'''
headers = {'Accept':'application/json'}
#behavior_info = requests.get( ('%s?ticket=%s') % (service_call, service_ticket), headers=headers)
# Let's print out our JSON to see what it looks like
# requests support JSON on it's own, so not other package needed for this
# print ('Behavior Information: \n %s \n') % (behavior_info.json() )
'''
Now let's get the names and IDs of some audiences
We can reuse our Ticket Granting Ticket for a 3 hour period ( we haven't passed that yet),
so let's use it to get a service ticket for the audiences service call.
Note that here we do have a parameter that is part of the call. That needs to be included
in the Service Ticket request.
We plan to make a call to the audience service to get the first 10 audiences in the system
ascending by audience id. We don't need to pass the sort order, because it defaults to ascending
'''
# Set up our call and get our new Service Ticket, we plan to sort by id
# Please insert audiences ID below:
audienceids = ['243733','243736','241134','242480','240678','242473','242483','241119','243732','242492','243784','242497','242485','243785','242486','242487','245166','245167','245168','245169','245170','245171','240860']
f = open("publisher_report_1PD.csv", 'w+')
title_str = ['1PD % Contribution','audienceId','publisherName','audienceName']
print >> f,(title_str)
for audience_id in audienceids:
service_call = base_api_url + 'reports/audiences/' + audience_id + '/publisher?stat_interval=LAST_MONTH&page_count=100&page_num=1&sort_attr=audienceName&inc_network=false&sort_order=ASC'
payload = {'service': service_call}
# Let's get the new Service Ticket, we can print it again to see it is a new ticket
service_ticket = requests.post( tg_ticket_location, data=payload ).text
#print ('Here is our new Service Ticket - %s \n') % ( service_ticket )
# Use the new ticket to query the service, remember we did have a parameter this time,
# so we need to & 'ticket=[The Service Ticket]' to the parameter list
audience_list = requests.get( ('%s&ticket=%s') % (service_call, service_ticket)).json()
#print audience_list
# create an array to hold the audiences, pull ou the details we want, and print it out
audiences = []
for ln in audience_list['stats']:
audiences.append({ 'audienceId': ln['audienceId'], 'audienceName': ln['audienceName'], 'publisherName': ln['publisherName'], '1PD % Contribution': ln['percentOfAudience']})
for ii in range( 0, len(audiences) ):
data = audiences[ii]
data_str = json.dumps(data)
result = data_str.replace("\"","")
result1 = result.replace("{1PD % Contribution:","")
result2 = result1.replace("publisherName: ","")
result3 = result2.replace("audienceName: ","")
result4 = result3.replace("audienceId: ","")
result5 = result4.replace("}","")
print >> f,(result5)
# Once we are done with the Ticket Granting Ticket we should clean it up'
remove_tgt = requests.delete( tg_ticket_location )
print ( 'Status for closing TGT - %s') % (remove_tgt.status_code)
i = input('YAY! Gotcha!!')
I see only one reason for your problem: you run script from different folder and then script is looking for config.cfg in different folder.
You can get full path to folder with script
import os
script_folder = os.path.dirname(os.path.realpath(__file__))
and create full path to config.cfg
parser.read( os.path.join(script_folder, 'config.cfg') )
I am trying to use the threading and storing the result on a threads using a single session. And this is working fine most of the time except a few scenarios where my whole application gets, and I am not able to figure out the reason for that.
My application is getting blocked on notification.save() in __filter_notifications_by_status_and_request_type method. notification.save() is saving the data in to the DB.
I am not able to figure out is this a DB issue or a threading or locking issue.
I am using the flask app, which I am hitting using passenger_wsgi via apache. After my application gets blocked, my server stop taking the further request.
DB python library used = SqlAlchemy
class Inference:
##
# #brief initializer of the Inference Handler
#
# #param kwargs keywords Arguments
#
# #return None
def __init__(self, **kwargs):
""" Calling the inference from here and get the result """
if((kwargs.has_key('IRISRequest'))):
self.iris_request = kwargs['IRISRequest']
self.racerx_inference_config = Config.get_racerx_inference_config()
self.thread_lock = threading.Lock()
##
# #brief Call the Infernce
#
# #return Inference Object
def get_inference_object(self):
log.info("get_inference_object is called")
inference_map = {}
inference_map['inference'] = {}
if self.iris_request.system == "athena":
url_to_notification_map = Config.get_url_to_notification_map()
for notification_id, urls in url_to_notification_map.iteritems():
inference_map['inference'][notification_id] = any(url in string.lower(self.iris_request.url) for url in urls)
title_to_notification_map = Config.get_title_to_notification_map()
if self.iris_request.context.has_key('title') :
for notification_id, titles in title_to_notification_map.iteritems():
if not inference_map['inference'].has_key(notification_id) or inference_map['inference'][notification_id] == False:
inference_map['inference'][notification_id] = any(title in string.lower(self.iris_request.context['title']) for title in titles)
return inference_map
##
# #brief
#
# #return the list of the notification required from the reference
def get_notification_name_list(self):
inference_object = self.get_inference_object()
return [y for y in inference_object['inference'] if inference_object['inference'][y] == True]
##
# #brief collect notifications from the various sources
#
# #return notification objects
def get_notifications(self):
if(len(self.iris_request.notification_name_list) > 0):
self.notification_name_list = self.iris_request.notification_name_list # List of Notifciation List is provided by the client
else:
self.notification_name_list = self.get_notification_name_list() # Get Notification Name List from the Inference
string_translations = {}
for notification_name in self.notification_name_list:
config = Config.get_config(notification_name)
nt = {}
nt['message'] = self.__get_message_from_template(config.message_map)
nt['subject'] = self.__get_message_from_template(config.subject_map)
nt['short_message'] = self.__get_message_from_template(config.short_message_map)
nt['impact_summary'] = self.__get_message_from_template(config.impact_summary_map)
action_string_map = {}
for h in config.action_summary_map:
if h.has_key('string_id'):
action_string_map[h['string_id']] = self.__get_message_from_template(h)
nt['action_summary_list'] = action_string_map
help_strings_map = {}
for h in config.help_content:
if h.has_key('string_id'):
help_strings_map[h['string_id']] = self.__get_message_from_template(h)
nt['help_content_strings'] = help_strings_map
string_translations[notification_name] = nt
notifications_map = {}
log.info("starting the thread pool for getting the notifications data")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_notification_name = dict((executor.submit(self.fetch_notifications_by_notification_name, notification_name, string_translations), notification_name)
for notification_name in self.notification_name_list)
log.info("end of threadpool")
log.info("start processing the data produced by the thread pool")
for future in concurrent.futures.as_completed(future_to_notification_name):
notification_name = future_to_notification_name[future]
if future.exception() is not None:
raise Exception("Error occured while fetching the data for notification: "+notification_name+", error: "+str(future.exception()))
if len(future.result()) > 0:
notifications_map[notification_name] = future.result()
log.info("end processing the data produced by the thread pool")
self.iris_request.session.commit()
log.info("Commited the DB session for the notifications")
return notifications_map
###
# #brief This function collect the notifications for the specified notification type, by making object model call
#
# #input notification_name : Type of the notification to be fetched
# #input string_translations : List of string translations
# #input notification_map : Map of notifications, collected notifications will be pushed to this map
def fetch_notifications_by_notification_name (self, notification_name, string_translations):
log.info("fetch_notifications_by_notification_name is called")
object_model = ObjectModel(IRISRequest = self.iris_request, NotificationName = notification_name, StringMap = string_translations[notification_name])
notifications = object_model.get_iris_notification_objects()
filtered_notifications = self.__filter_notifications_by_status_and_request_type(notifications)
if len(filtered_notifications) > 0:
return filtered_notifications
else:
return []
###
# #brief This function filter the notification based on status, i.e. of notification is expired, snoozed or dismissed
# and also based on request type
#
# #input notifications: List of notifications
#
# #return Filtered notification list
def __filter_notifications_by_status_and_request_type(self, notifications):
log.info("__filter_notifications_by_status_and_request_type is called")
filtered_notifications = []
for notification in notifications:
keep_notification = True
# Extracting read status of notifications and storing new notifications
log.info("Acquiring the lock on thread, to save the data into DB")
self.thread_lock.acquire()
notification.save()
self.thread_lock.release()
log.info("Releasing the lock after saving the data into DB")
# Filtering inactive notifications, i.e dismissed notifications
if notification.is_active == False:
keep_notification = False
# Filtering expired notifications, if validity = -1 then notification will never expire
if notification.validity != -1 and (datetime.date.today() - notification.creation_date).days > notification.validity:
keep_notification = False
# Filtering out the snoozed notifications
if notification.snooze_date != None and (datetime.datetime.today() - notification.snooze_date).days <= notification.snooze_duration:
keep_notification = False
# Filtering out unread notification when request type is FETCH_READ
if self.iris_request.notifcation_fetch_type == Constants.FETCH_TYPE_READ and notification.is_read == False:
keep_notification = False
# Filtering out read notification when request type is FETCH_UNREAD
if self.iris_request.notifcation_fetch_type == Constants.FETCH_TYPE_UNREAD and notification.is_read == True:
keep_notification = False
if keep_notification == True:
filtered_notifications.append(notification)
return filtered_notifications
I was using the lock in given manner
self.thread_lock.acquire()
notification.save()
self.thread_lock.release()
when notification.save() is throwing an exception, then system will unable to release the thread.
it could be easily fixed by proper error handling.
self.thread_lock.acquire()
try:
notification.save()
except Exception as e:
log.error("unable to store info in DB")
finally:
self.thread_lock.release()