Google Clasroom Python API Use batch request and get the request back - python

I am new to google classroom api, just started a few days ago. I want to use batch request to make the code faster. My problem is that I don't know how to get the data back from the batch. I tried printing it in the console, but it returned null.
def get_all_courses(service):
nextpageToken = ""
list_id = []
while nextpageToken is not None:
result = service.courses().list(
pageSize=500,
pageToken=nextpageToken,
fields="nextPageToken,courses(id)"
)
result = result.execute()
lista_curs = result.get("courses")
for curs in lista_curs:
list_id.append(curs.get('id'))
nextpageToken = result.get("nextPageToken")
print("Ther are :" + str(len(list_id)))
return list_id
This is the normal usual code. How can I pass the request to a batch and get the results back?
batch1 = service.new_batch_http_request()
result = service.courses().list(
pageSize=500,
pageToken=nextpageToken,
fields="nextPageToken,courses(id)"
)
batch1.add(result)
batch1.execute()# how do I get back the result? do I have to call result.execute() again?
This is the code from the docs: https://developers.google.com/classroom/guides/batch
course_id = '123456'
student_emails = ['alice#example.edu', 'bob#example.edu']
def callback(request_id, response, exception):
if exception is not None:
print 'Error adding user "{0}" to the course course: {1}'.format(
request_id, exception)
else:
print 'User "{0}" added as a student to the course.'.format(
response.get('profile').get('name').get('fullName'))
batch = service.new_batch_http_request(callback=callback)
for student_email in student_emails:
student = {
'userId': student_email
}
request = service.courses().students().create(courseId=course_id,
body=student)
batch.add(request, request_id=student_email)
batch.execute(http=http) # what is http? what can I pass there? what kind of object is that?
Thank you in advance

Did you get a response from this? How have you handled the next page tokens and added them to the batch call?
def makeRequestWithExponentialBackoff(analytics):
for n in range(0, 5):
try:
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
print(response.get("studentSubmissions", []))
return assignments.extend(response.get("nextPageToken", []))
except:
time.sleep((2 ** n) + random.random())
continue
print("There has been an error, the request never succeeded.")
def callback(request_id, response, exception):
if exception is not None:
print('Error getting assignments "{0}" for course: "{1}"'.format(request_id, exception))
makeRequestWithExponentialBackoff(request_id)
else:
assignments.extend(response.get("studentSubmissions", []))
nextPageToken = response.get("nextPageToken", None)
if nextPageToken:
iftoken(nextPageToken, request_id)
else:
pass
for k, v in filtered.iterrows():
if count % 1000 == 0:
submit = batch.execute(http=http)
batch_count += 1
print(batch_count)
time.sleep(30)
batch_array.append({'batchSent {}'.format(v['id'])})
batch = None
batch = service_two.new_batch_http_request(callback=callback)
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
batch.add(response, request_id=v['id'])
array.append({'email': v['primaryEmail'], 'count': count, 'classid': v['id']})
count = 1
elif count % 1000 != 0 and batch == None:
batch = service_two.new_batch_http_request(callback=callback)
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
batch.add(response, request_id=v['id'])
array.append({'email': v['primaryEmail'], 'count': count, 'classid': v['id']})
count += 1
else:
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
batch.add(response, request_id=v['id'])
array.append({'email': v['primaryEmail'], 'count': count, 'classid': v['id']})
count += 1
where filtered is a dataframe of classroom Ids. However got an issue with the nextpagetokens and how to add these to the dataframe?

Related

How to pass multiple API calls?

I have a simple validation API call like this:
client = Client(
token='{{YOUR_TOKEN_HERE}}',
key='{{YOUR_KEY}}',
environment='prod'
)
lookup_api = client.validations
result = lookup_api.list(number="{{NUMBER}}")
if result['status'] == 200:
print(result['data'])
else:
print("An error occurred." + str(result['status']))
print(result['data'])
I want to pass multiple different tokens and multiple numbers, how should I do it?
I tried one token with multiple numbers and it worked, but I have been stuck on passing multiple tokens to multiple numbers for hours.
Here was my attempt:
tokens = ['112233','223344']
key='10000-000'
environment='prod'
clients = [Client(tokens=token, key=key, environment=environment) for token in tokens]
lookup_api = [list(clients=x).validations for x in clients]
results = [lookup_api.list(number=x) for x in numbers]
for result in results:
if result['status'] == 200:
print(result['data'])
else:
print("An error occurred." + str(result['status']))
print(result['data'])
Any suggestion or help would be greatly appreciate!
list(clients=x) isn't proper syntax.
If you want to call the list function of client.validations like you had before you'd want this
results = [c.validations.list(number=n) for n in numbers for c in clients]
Otherwise, use a regular loop
for c in clients:
for n in numbers:
result = c.validations.list(number=n)
status = result['status']
data = result['data']
if status != 200:
print("An error occurred. " + str(status))
print(data)

How to reconnect after a ServerDisconnectedError or ServerTimeoutError in "await asyncio.gather(*tasks)" when using aiohttp.ClientSession()?

So, I'm trying to repeat a failed loop, because of errors such as ServerDisconnectedError and ServerTimeoutError, but I not getting it. Basically, what I'm doing is sending product codes to an endpoint that will return me some information about each one. I'm using asyncio.ensure_future to create the tasks and await asyncio.gather(*tasks) do the tasks (I'm not really familiar with asyncio and aiohttp functions, so I might be wrong in this explanation).
The ETL is running perfectly, the problem is the bigger is the chunk_size (amount of product code to be sent to the endpoint) the higher is the probability to get a ServerDisconnectedError or ServerTimeoutError during the loop. I can catch both of these two exceptions, the problem is, once I repeat the loop with "continue" it looks like the repeated loop doesnt work the properly (taking just a few time to send the same chunk of codes that took a good time running the first time), continually showing me the same excepetions I got the first time of the round. I don't know what to do, if I must reconnect the server once disconnected or anything else. I need to repeat the loop that failed and send again the chunk of product codes to the endpoint, as it did for the first time, in an attempt to acquire the information from this endpoint. Could anybody help with that? I hope I have been clear in the explanation of the problem.
So, this is the code:
async def get_all_balance(session, url, headers, product_code, company_code, balance_code):
async with session.post(url,
data=json.dumps(
{'products': [product_code], 'companies': company_code, 'cdBalance': balance_code, 'inStock': 1, 'inSalesOrder': 1}),
headers=headers) as resp:
if resp.status == 200:
data = await resp.read()
product_balance = json.loads(data.decode("utf-8"))
for data in product_balance['saldos']:
lst_balance.append({
"cd_SKU": data.get('cdSKU'),
"cd_product": data.get('cdProduct'),
"cd_reference": data.get('cdReference'),
"cd_color": data.get('cdColor'),
"ds_size": data.get('dsSize'),
"cd_company": data.get('cdCompany'),
"nr_CNPJ_company": data.get('nrCNPJCompany'),
"qt_stock": data.get('qtStock'),
"qt_sales_order": data.get('qtSalesOrder'),
"cd_balance": codigo_balance
})
else:
print("Resp.Status: {}".format(resp.status))
async def main():
token = get_token() # Get a token to access the endpoint
headers = {
'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json'
}
async with aiohttp.ClientSession() as session:
lst_dict_cd_product = get_product_balance() # Get a list of dicts containing the product codes such as [{"cdProduto": "13889"}, {"cdProduto": "059788"}, ...]
print("Qt products: " + str(len(lst_dict_cd_product)))
tasks_balance = []
lst_companies = [{"cdCompany": 1}, {"cdCompany": 2}, ...]
lst_codigo_balance = [1, 11, 14] # A balance code to be sent to the endpoint as a part of the payload
chunk_size = 30000
count = 0
num_chunks = len(lst_dict_cd_product) / chunk_size
mod_chunks = len(lst_dict_cd_product) % chunk_size
# print(num_chunks)
# print(mod_chunks)
if mod_chunks != 0:
print(int(num_chunks) + 1)
else:
print(int(num_chunks))
for balance_code in lst_balance_code:
if balance_code == 1:
company_code= lst_companies[0:]
print(company_code)
else:
company_code= lst_companies[0:1]
print(company_code)
for start in range(0, len(lst_dict_cd_product), chunk_size):
end = start + chunk_size
# print(lst_dict_cd_product[start:end]) # item chunks
lst_product_code = lst_dict_cd_product[start:end]
print("Chunks : {} of {} round {} s:e{}".format(
start, end, count, str(lst_codigo_product[0]) + ":" + str(lst_codigo_product[-1])))
if count != 0:
time.sleep(60)
k=0
while k < 1:
for product_code in lst_codigo_product:
url = 'https://www30.bhan.com.br:9443/api/v1/produto/saldoproduto'
balance_tasks.append(asyncio.ensure_future(
get_all_balance(session, url, headers, product_code, company_code, balance_code))) # emp_all[0:]
try:
await asyncio.gather(*balance_tasks)
count += 1
k += 1
except Exception as e:
continue
# Save a json file
asyncio.run(main())
```python
Make an infinite loop:
import time
connection = None
while(True):
connection = try_connect()
if connection:
break # Get out of the loop.
time.sleep(2) # Seconds.
do_someting(connection)
EDIT:
def try_connect():
try:
connection = ... # get connection.
except [failed_connection_exception_type]:
return False
else:
return connection
EDIT 2:
Short version
import time
connection = None
while(True):
try:
connection = ... # get connection.
except [exception_type_to_catch]:
time.sleep(2) # Seconds.
continue
else:
break
do_someting(connection)

Is there a way to avoid Rate Limit Error in Twitter API in my python program?

I am trying to create a Twitter user graph and for that I have written the following code :
import operator
import sys
import time
from urllib.error import URLError
from http.client import BadStatusLine
import json
import twitter
from functools import partial
from sys import maxsize as maxint
import itertools
import networkx
import matplotlib.pyplot as plt
G = networkx.Graph()
# Code and function taken from the twitter cookbook
def oauth_login():
CONSUMER_KEY = 'xxxx'
CONSUMER_SECRET = 'xxZD6r'
OAUTH_TOKEN = 'xxNRYl'
OAUTH_TOKEN_SECRET = 'xxHYJl'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
# Code and function taken from the twitter cookbook
def make_twitter_request(twitter_api_func, max_errors=10, *args, **kw):
# A nested helper function that handles common HTTPErrors. Return an updated
# value for wait_period if the problem is a 500 level error. Block until the
# rate limit is reset if it's a rate limiting issue (429 error). Returns None
# for 401 and 404 errors, which requires special handling by the caller.
def handle_twitter_http_error(e, wait_period=2, sleep_when_rate_limited=True):
if wait_period > 3600: # Seconds
print('Too many retries. Quitting.', file=sys.stderr)
raise e
if e.e.code == 401:
print('Encountered 401 Error (Not Authorized)', file=sys.stderr)
return None
elif e.e.code == 404:
print('Encountered 404 Error (Not Found)', file=sys.stderr)
return None
elif e.e.code == 429:
print('Encountered 429 Error (Rate Limit Exceeded)', file=sys.stderr)
if sleep_when_rate_limited:
print("Retrying in 15 minutes...ZzZ...", file=sys.stderr)
sys.stderr.flush()
time.sleep(60 * 15 + 5)
print('...ZzZ...Awake now and trying again.', file=sys.stderr)
return 2
else:
raise e # Caller must handle the rate limiting issue
elif e.e.code in (500, 502, 503, 504):
print('Encountered {0} Error. Retrying in {1} seconds'.format(e.e.code, wait_period), file=sys.stderr)
time.sleep(wait_period)
wait_period *= 1.5
return wait_period
else:
raise e
wait_period = 2
error_count = 0
while True:
try:
return twitter_api_func(*args, **kw)
except twitter.api.TwitterHTTPError as e:
error_count = 0
wait_period = handle_twitter_http_error(e, wait_period)
if wait_period is None:
return
except URLError as e:
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
print("URLError encountered. Continuing.", file=sys.stderr)
if error_count > max_errors:
print("Too many consecutive errors...bailing out.", file=sys.stderr)
raise
except BadStatusLine as e:
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
print("BadStatusLine encountered. Continuing.", file=sys.stderr)
if error_count > max_errors:
print("Too many consecutive errors...bailing out.", file=sys.stderr)
raise
# Code and function taken from the twitter cookbook
def get_friends_followers_ids(twitter_api, screen_name=None, user_id=None,
friends_limit=maxint, followers_limit=maxint):
# Must have either screen_name or user_id (logical xor)
assert (screen_name is not None) != (user_id is not None), "Must have screen_name or user_id, but not both"
# See https://developer.twitter.com/en/docs/twitter-api/v1/accounts-and-users/follow-search-get-
#users/api-reference/get-friends-ids for details
# on API parameters
get_friends_ids = partial(make_twitter_request, twitter_api.friends.ids, count=5000)
get_followers_ids = partial(make_twitter_request, twitter_api.followers.ids, count=5000)
friends_ids, followers_ids = [], []
for twitter_api_func, limit, ids, label in [
[get_friends_ids, friends_limit, friends_ids, "friends"],
[get_followers_ids, followers_limit, followers_ids, "followers"]
]:
if limit == 0: continue
cursor = -1
while cursor != 0:
# Use make_twitter_request via the partially bound callable...
if screen_name:
response = twitter_api_func(screen_name=screen_name, cursor=cursor)
else: # user_id
response = twitter_api_func(user_id=user_id, cursor=cursor)
if response is not None:
ids += response['ids']
cursor = response['next_cursor']
print('Fetched {0} total {1} ids for {2}'.format(len(ids), label, (user_id or screen_name)),
file=sys.stderr)
# XXX: You may want to store data during each iteration to provide an
# an additional layer of protection from exceptional circumstances
if len(ids) >= limit or response is None:
break
# Do something useful with the IDs, like store them to disk...
return friends_ids[:friends_limit], followers_ids[:followers_limit]
# Code and function taken from the twitter cookbook
def get_user_profile(twitter_api, screen_names=None, user_ids=None):
# Must have either screen_name or user_id (logical xor)
assert (screen_names is not None) != (user_ids is not None)
items_to_info = {}
items = screen_names or user_ids
while len(items) > 0:
items_str = ','.join([str(item) for item in items[:100]])
items = items[100:]
if screen_names:
response = make_twitter_request(twitter_api.users.lookup, screen_name=items_str)
else: # user_ids
response = make_twitter_request(twitter_api.users.lookup, user_id=items_str)
for user_info in response:
if screen_names:
items_to_info[user_info['screen_name']] = user_info
else: # user_ids
items_to_info[user_info['id']] = user_info
return items_to_info
# Function to find reciprocal friends and sort them such that we get the top 5 friends
def reciprocal_friends(twitter_api, screen_name=None, user_id=None):
friends_list_ids, followers_list_ids = get_friends_followers_ids(twitter_api, screen_name=screen_name,
user_id=user_id,
friends_limit=5000, followers_limit=5000)
friends_reciprocal = list(set(friends_list_ids) & set(followers_list_ids))
list_followers_count = []
user_profiles = {}
for each in friends_reciprocal:
user_profiles[each] = get_user_profile(twitter_api, user_ids=[each])[each]
list_followers_count.append(user_profiles[each]['followers_count'])
res = sorted(list_followers_count, reverse=True)
friends_count = {user_profiles[fr]['followers_count']: fr for fr in friends_reciprocal}
list_resciprocal = []
if len(res) < 6:
list_resciprocal = friends_reciprocal
else:
for i in range(5):
list_resciprocal.append(friends_count[res[i]])
return list_resciprocal
# This function finds reciprocal friends again and again till we achieve at least 100 nodes
def crawler(twitter_api, screen_name=None, user_id=None):
rec_friends = reciprocal_friends(twitter_api, screen_name=screen_name, user_id=user_id)
edges = [(screen_name, x) for x in rec_friends]
G.add_edges_from(edges)
nodes = nxt_qu = rec_friends
if len(nodes) == 0:
print("No reciprocal friends")
return rec_friends
while G.number_of_nodes() < 101:
print("Queue Items : ", nxt_qu)
(queue, nxt_qu) = (nxt_qu, [])
for q in queue:
if G.number_of_nodes() >= 101:
break
print("ID Entered:", q)
res = reciprocal_friends(twitter_api, screen_name=None, user_id=q)
edges = [(q, z) for z in res]
G.add_edges_from(edges)
nxt_qu += res
nodes += res
print(nodes)
# To Plot the graph
networkx.draw(G)
plt.savefig("graphresult.png")
plt.show()
# Printing the Output
print("No. of Edges: ", G.number_of_edges())
print("No. of Nodes: ", G.number_of_nodes())
print("Diameter : ", networkx.diameter(G))
print("Average Distance: ", networkx.average_shortest_path_length(G))
# To write the output into a file
f = open("output.txt", "w")
f.write("No. of Nodes: " + str(G.number_of_nodes()))
f.write("\nNo. of Edges: " + str(G.number_of_edges()))
f.write("\nDiameter: " + str(networkx.diameter(G)))
f.write("\nAverage Distance: " + str(networkx.average_shortest_path_length(G)))
twitter_api = oauth_login()
crawler(twitter_api, screen_name="POTUS")
However I am getting this error often and this is making my program run very slow
ID Entered: 60784269
Fetched 5000 total friends ids for 60784269
Fetched 5000 total followers ids for 60784269
Encountered 429 Error (Rate Limit Exceeded)
Retrying in 15 minutes...ZzZ...
Is there a way to get around this ? Make the code run faster ?
I have read a few documents but I still dont have any clear picture. Any help is appreciated.
There is no way to go around the rate limits restrictions with the Public API.
Though there is an API v2 now which also allow you to get users and do not work against the same rate limits.
https://developer.twitter.com/en/docs/twitter-api/users/lookup/introduction
Notice that this solution would be temporary as Twitter will at some point remove access to API v1.
You can request twitter to have access to premium/enterprise level of the API but you will have to pay for that.
You can see rate limits documentation here :
API v1
API v2

Python: Nested while loop inside a for loop breaking unexpectedly without any error

I have a while loop nested within a for loop that's running over a json array collected from firestore, which collects stock symbols to pass to another api to gather minute by minute trading data to put back into the firestore db.
While I'm running the loop, it'll stop unexpectedly without any error around the fourth or sixth (never more) time through the 389 entry while loop.
Any idea why this is? Is it something with my code? I noticed if I changed the limit in the while loop from 389 down to 100, it worked through all the companies within the json array. But it won't get through many than four companies down the list if it's the full 389 entries.
Anyway, thanks for the help!
import requests
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
cred = credentials.Certificate("./serviceAccountKey.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
doc_ref1 = db.collection(u'Quiver').stream()
for doc in doc_ref1:
symbol = doc.id
api_url = "https://api.iextrading.com/1.0/stock/{}/chart/1d".format(symbol)
query_url = api_url
r = requests.get(query_url)
if r.status_code != 200:
print("Error:", r.status_code)
continue
if r.status_code == 404:
print("Error:", r.status_code, symbol)
continue
json_stock = r.json()
b = 0
while b <= 100:
try:
date = json_stock[b]['date']
minute = json_stock[b]['minute']
label = json_stock[b]['label']
high = json_stock[b]['high']
low = json_stock[b]['low']
average = json_stock[b]['average']
volume = json_stock[b]['volume']
notional = json_stock[b]['notional']
numberOfTrades = json_stock[b]['numberOfTrades']
marketHigh = json_stock[b]['marketHigh']
marketLow = json_stock[b]['marketLow']
marketAverage = json_stock[b]['marketAverage']
marketVolume = json_stock[b]['marketVolume']
marketNotional = json_stock[b]['marketNotional']
marketNumberOfTrades = json_stock[b]['marketNumberOfTrades']
open = json_stock[b]['open']
close = json_stock[b]['close']
marketOpen = json_stock[b]['marketOpen']
marketClose = json_stock[b]['marketClose']
changeOverTime = json_stock[b]['changeOverTime']
marketChangeOverTime = json_stock[b]['marketChangeOverTime']
doc_ref = db.collection(u'dailies').document(u'{}-{}'.format(minute, symbol))
doc_ref.set({
u'date':u'{}'.format(date),
u'minute':u'{}'.format(minute),
u'label':u'{}'.format(label),
u'high':u'{}'.format(high),
u'average':u'{}'.format(average),
u'notional':u'{}'.format(notional),
u'number of trades':u'{}'.format(numberOfTrades),
u'market high':u'{}'.format(marketHigh),
u'market low':u'{}'.format(marketLow),
u'market average':u'{}'.format(marketAverage),
u'market volume':u'{}'.format(marketVolume),
u'market notional':u'{}'.format(marketNotional),
u'market number of trades':u'{}'.format(marketNumberOfTrades),
u'open':u'{}'.format(open),
u'close':u'{}'.format(close),
u'market open':u'{}'.format(marketOpen),
u'market close':u'{}'.format(marketClose),
u'change over time':u'{}'.format(changeOverTime),
u'market change over time':u'{}'.format(marketChangeOverTime)
})
print("{} {}: {}".format(symbol, minute, b))
b += 1
except IndexError:
print("Index Error")
break
you can use:
except Exception as errmsg:
print(errmsg)
and provide more information

Instancemethod object is not iterable (AirPi) (Python)

I got ERROR:Exception during output: 'instancemethod' object is not iterable when debugging this AirPi code from https://github.com/haydnw/AirPi/blob/master/outputs/ubidots.py
This suppose to upload my sensor data to the Ubidots server.
*I'd put my correct token and variable ID inside the configuration file for this AirPi.
requiredSpecificParams = ["token"]
optionalSpecificParams = ["showcost",
"ID-BMP085-temp",
"ID-BMP085-pres",
"ID-DHT22-hum",
"ID-DHT22-temp",
"ID-LDR",
"ID-TGS2600",
"ID-MiCS-2710",
"ID-MiCS-5525",
"ID-Microphone",
"ID-Raingauge"
]
def __init__(self, config):
super(Ubidots, self).__init__(config)
self.token = self.params["token"]
if "showcost" in self.params:
self.showcost = self.params["showcost"]
else:
self.showcost = False
self.ubivariables = {}
for key, value in self.params.iteritems():
if key[:3] == "ID-":
if value:
self.ubivariables[key[3:]] = value
def output_data(self, datapoints, dummy):
"""Output data.
Output data in the format stipulated by the plugin. Calibration
is carried out first if required.
Because this particular plugin (ubidots) does not show time, the
third argument (normally called 'sampletime') is called 'dummy'
to facilitate compliance with pylint.
Args:
self: self.
datapoints: A dict containing the data to be output.
dummy: datetime representing the time the sample was taken.
Returns:
boolean True if data successfully output to Ubidots; False if
not
"""
if self.params["calibration"]:
datapoints = self.cal.calibrate(datapoints)
payload = []
for point in datapoints:
for ubivariablename, ubivariableid in self.ubivariables.iteritems():
if point["sensor"] == ubivariablename:
if point["value"] is not None:
thisvalue = {}
thisvalue["variable"] = ubivariableid
thisvalue["value"] = point["value"]
payload.append(thisvalue)
break
headers = {'Accept': 'application/json; indent=4', 'Content-Type': 'application/json', 'X-Auth-Token': self.token}
url = "http://things.ubidots.com/api/v1.6/collections/values"
req = None
cost = 0
try:
req = requests.post(url, data=json.dumps(payload), headers=headers)
except Exception, e:
print("ERROR: Failed to contact the Ubidots service.")
print("ERROR: " + str(e))
return False
for response in req.json:
if response["status_code"] is not 201:
print("ERROR: Ubidots responded with an error for one of the values.")
return False
else:
cost += 1
if self.showcost:
print("Ubidots upload cost " + str(cost) + " dots.")
return True
for response in req.json:
According to the documentation, json is a method and must be called, so this should be:
for response in req.json():
In the future it is helpful to include just as much of your code as is necessary to reproduce the problem, and to include the complete error message with traceback.

Categories