I am making a python script using API of a free test automation website called TestProject.
Link to their API: https://api.testproject.io/docs/v2/
Basically what i want to do is grab pdf of reports of all tests and save them somewhere.
But to make the GET request to do that i first need projectID and jobID which i already wrote functions getting them and saving them in the array.
But now i have a problem where its looping through both lists and not using correct projectID and jobID and its throwing errors because it does not exist.
So what i need is something to check if jobID is in projectID so that way i can make a GET request to get all the executionID's to get the PDF of the report.
I am kinda new to programming so i would love any help i can get. If anyone has any better solutions please feel free to let me know.
My script:
import requests
import json
import csv
from datetime import datetime
from jsonpath_ng import jsonpath, parse
API_key = 'api_key'
headers = {'Authorization':'{}'.format(API_key)}
list_projectId = []
list_jobId = []
list_executionId = []
ParseData_projectId = parse('$..id')
ParseData_jobId = parse('$..id')
ParseData_executionId = parse('$..id')
def parsing (response,ParseData,list_data):
# parses data and appends it to the list
Data = json.loads(response)
Parsaj = ParseData
Podatki = Parsaj.find(Data)
for i in range(0, len(Podatki)):
vrednost = Podatki[i].value
list_data.append(vrednost)
def projectId():
# gets all projectId's and saves them in list_projectId
url = 'https://api.testproject.io/v2/projects?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_projectId,list_projectId)
def jobId():
# gets all jobId's and saves them in list_jobId
for i in range(0, len(list_projectId)):
id = list_projectId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(id) + '/jobs?onlyScheduled=false&_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_jobId,list_jobId)
def executionId():
# Their API link:
# https://api.testproject.io/v2/projects/{projectId}/jobs/{jobId}/reports?_start=0
# the for loop below does not work here is where i need the help:
for i in range(0, len(list_projectId)):
project_id = list_projectId[i]
job_id = list_jobId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(project_id) + '/jobs/{}'.format(job_id) + '/reports?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_executionId,list_executionId)
projectId()
print("----------LIST PROJECT ID: ----------")
print(list_projectId)
print("")
jobId()
print("----------LIST JOB ID: ----------")
print(list_jobId)
executionId()
print("----------LIST EXECUTION ID: ----------")
print(list_executionId)
you have to use 'in' operator to check the value exist in the list data structure.
So here is what I am trying to do I am trying to get my Twitter bot to give maths answers to users using WolframAlpha API
here is what problem I am facing
as people will mention my Twitter username to active the bot example: #twitterusername 2+2
the WolframAlpha will take it as the whole input #twitterusername 2+2 which will give me the error I want it to ignore the username
here is my code
def respondToTweet(file='tweet_ID.txt'):
last_id = get_last_tweet(file)
mentions = api.mentions_timeline(last_id, tweet_mode='extended')
if len(mentions) == 0:
return
new_id = 0
logger.info("someone mentioned me...")
for mention in reversed(mentions):
logger.info(str(mention.id) + '-' + mention.full_text)
new_id = mention.id
status = api.get_status(mention.id)
if '#Saketltd01' in mention.full_text.lower():
logger.info("Responding back with QOD to -{}".format(mention.id))
client = wolframalpha.Client(app_id)
query = mention.full_text.lower()
rest = client.query(query)
answer = next(rest.results).text
Wallpaper.get_wallpaper(answer)
media = api.media_upload("created_image.png")
logger.info("liking and replying to tweet")
api.create_favorite(mention.id)
api.update_status('#' + mention.user.screen_name, mention.id,
media_ids=[media.media_id])
put_last_tweet(file, new_id)
def main():
respondToTweet()
When you take the whole input remember to strip it down by simply removing your username from the actual input string and then perform the mathematical operation on it:
myUsername = "#my_username"
equation = userInput.lstrip(myUsername)
perform_desired_operation_on(equation) // User defined function
I want to know if there is some way through which I can obtain all the Intents(its corresponding questions), Entities and the training data(I defined in Google Dialogflow) programmatically using python.
Following is the code(works fine) through which i get a response from dialogflow.
GOOGLE_APPLICATION_CREDENTIALS = '###'
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] =
GOOGLE_APPLICATION_CREDENTIALS
def dialogflow_api_response(text):
session_client = dialogflow.SessionsClient()
session = session_client.session_path('###', '##')
text_input = dialogflow.types.TextInput(text=text, language_code='en')
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(session=session, query_input=query_input)
jsonObj = MessageToJson(response)
x = json.loads(jsonObj)
If there is, please point me in the right direction.
Thank you
Yes it is possible to get all the intents, entities, unserEntities and contexts from Dialogflow agent using Dilogflow API as JSON object. Here's the code to get the list of intents:
def list_intents(project_id):
import dialogflow_v2 as dialogflow
intents_client = dialogflow.IntentsClient()
parent = intents_client.project_agent_path(project_id)
intents = intents_client.list_intents(parent)
for intent in intents:
print('=' * 20)
print('Intent name: {}'.format(intent.name))
print('Intent display_name: {}'.format(intent.display_name))
print('Action: {}\n'.format(intent.action))
print('Root followup intent: {}'.format(
intent.root_followup_intent_name))
print('Parent followup intent: {}\n'.format(
intent.parent_followup_intent_name))
print('Input contexts:')
for input_context_name in intent.input_context_names:
print('\tName: {}'.format(input_context_name))
print('Output contexts:')
for output_context in intent.output_contexts:
print('\tName: {}'.format(output_context.name))
for training_phrase in intent.training_phrases:
train_phrase = training_phrase.parts[0].text
print(train_phrase)
For further information you can refer to DF official gitHub here
I'm struggling to get a Lambda function working. I have a python script to access twitter API, pull information, and export that information into an excel sheet. I'm trying to transfer python script over to AWS/Lambda, and I'm having a lot of trouble.
What I've done so far: Created AWS account, setup S3 to have a bucket, and poked around trying to get things to work.
I think the main area I'm struggling is how to go from a python script that I'm executing via local CLI and transforming that code into lambda-capable code. I'm not sure I understand how the lambda_handler function works, what the event or context arguments actually mean (despite watching a half dozen different tutorial videos), or how to integrate my existing functions into Lambda in the context of the lambda_handler, and I'm just very confused and hoping someone might be able to help me get some clarity!
Code that I'm using to pull twitter data (just a sample):
import time
import datetime
import keys
import pandas as pd
from twython import Twython, TwythonError
import pymysql
def lambda_handler(event, context):
def oauth_authenticate():
twitter_oauth = Twython(keys.APP_KEY, keys.APP_SECRET, oauth_version=2)
ACCESS_TOKEN = twitter_oauth.obtain_access_token()
twitter = Twython(keys.APP_KEY, access_token = ACCESS_TOKEN)
return twitter
def get_username():
"""
Prompts for the screen name of targetted account
"""
username = input("Enter the Twitter screenname you'd like information on. Do not include '#':")
return username
def get_user_followers(username):
"""
Returns data on all accounts following the targetted user.
WARNING: The number of followers can be huge, and the data isn't very valuable
"""
#username = get_username()
#import pdb; pdb.set_trace()
twitter = oauth_authenticate()
datestamp = str(datetime.datetime.now().strftime("%Y-%m-%d"))
target = twitter.lookup_user(screen_name = username)
for y in target:
target_id = y['id_str']
next_cursor = -1
index = 0
followersdata = {}
while next_cursor:
try:
get_followers = twitter.get_followers_list(screen_name = username,
count = 200,
cursor = next_cursor)
for x in get_followers['users']:
followersdata[index] = {}
followersdata[index]['screen_name'] = x['screen_name']
followersdata[index]['id_str'] = x['id_str']
followersdata[index]['name'] = x['name']
followersdata[index]['description'] = x['description']
followersdata[index]['date_checked'] = datestamp
followersdata[index]['targeted_account_id'] = target_id
index = index + 1
next_cursor = get_followers["next_cursor"]
except TwythonError as e:
print(e)
remainder = (float(twitter.get_lastfunction_header(header = 'x-rate-limit-reset')) \
- time.time())+1
print("Rate limit exceeded. Waiting for:", remainder/60, "minutes")
print("Current Time is:", time.strftime("%I:%M:%S"))
del twitter
time.sleep(remainder)
twitter = oauth_authenticate()
continue
followersDF = pd.DataFrame.from_dict(followersdata, orient = "index")
followersDF.to_excel("%s-%s-follower list.xlsx" % (username, datestamp),
index = False, encoding = 'utf-8')
I need some help implementing a python app that accesses the Quickbooks API. I have successfully written several apps that use APIs, but once we get into the OAuth world, I get a bit lost.
At any rate, I found the quickbooks-python wrapper here:
https://github.com/troolee/quickbooks-python
but there are zero examples of working code showing how to implement properly. I imagine that a more experienced python programmer could figure out how to make this work without any instructions, but it seems like I'm missing the basics.
If I could get it connected, I could probably get it to work from there...
It seems like the documentation on github jumps around and for a more experienced programmer, would probably make perfect sense. But I'm just not following...
from quickbooks import *
consumerKey = "fromApiConsole"
consumerSecret = "fromApiConsole"
callbackUrl = "https://quickbooks.api.intuit.com/v3"
qbObject = QuickBooks(
consumer_key = consumerKey,
consumer_secret = consumerSecret,
callback_url = callbackUrl
)
authorize_url = qbObject.get_authorize_url() # will create a service, and further set up the qbObject.
oauth_token = request.GET['oauth_token']
oauth_verifier = request.GET['oauth_verifier']
realm_id = request.GET['realmId']
session = qbObject.get_access_tokens(oauth_verifier)
# say you want access to the reports
reportType = "ProfitAndLoss"
url = "https://quickbooks.api.intuit.com/v3/company/asdfasdfas/"
url += "reports/%s" % reportType
r = session.request( #This is just a Rauth request
"POST",
url,
header_auth = True,
realm = realm_id,
params={"format":"json"}
)
qb = QuickBooks(
consumer_key = consumerKey,
consumer_secret = consumerSecret,
access_token = qbtoken.access_token, # the stored token
access_token_secret = qbtoken.access_token_secret, # the stored secret
company_id = qbtoken.realm_id #the stored realm_id
)
qbText = str(qb.query_objects(business_object, params, query_tail))
print qbText
I am pretty sure that I am:
importing the wrong modules/classes
missing huge pieces of code to "glue together" the samples found on github
not using django here and i know the request class above is in django, but i'd really like to just make this work as a python script without using django
not getting the token/identifier/realmId from the initial authorize_url function. it prints on the screen, but i'm not sure how to grab it...
The end goal here is really just to connect and get a P&L statement from Quickbooks Online. If I can get that far, I am sure I can get the rest of what I need out of the API. I don't really need to CHANGE anything, I'm just looking to include data from the reports into some dashboards.
* UPDATE *
okay, i figured out how to get it to connect, but i'm not sure how to get to the reports.
the answer was this, which was on the prior API page:
Accessing the API
Once you've gotten a hold of your QuickBooks access tokens, you can create a QB object:
qb = QuickBooks(consumer_key = QB_OAUTH_CONSUMER_KEY,
consumer_secret = QB_OAUTH_CONSUMER_SECRET,
access_token = QB_ACCESS_TOKEN,
access_token_secret = QB_ACCESS_TOKEN_SECRET,
company_id = QB_REALM_ID
)
still trying to get the basic reports...
Okay, so here's how to make this work. I'm focused on the reports, so here's how you can get reports from Quickbooks Online API using Python:
1) Go to https://github.com/finoptimal-dev/quickbooks-python and download the code
2) Make sure you have rauth installed. If you are on AWS/EC2, simply:
sudo yum install rauth
3) Edit the quickbooks2.py file and add the following to the END:
qb = QuickBooks(consumer_key = QB_OAUTH_CONSUMER_KEY,
consumer_secret = QB_OAUTH_CONSUMER_SECRET,
access_token = QB_ACCESS_TOKEN,
access_token_secret = QB_ACCESS_TOKEN_SECRET,
company_id = QB_REALM_ID
)
4) Setup a sandbox application on the Quickbooks site here: https://developer.intuit.com/v2/ui#/app/startcreate (you will have to create a developer account if you don't already have one)
5) Once setup, you can go to the "Keys" tab of the App and grab the App Token, OAuth Consumer Key and OAuth Consumer Secret.
6) Go to the Intuit Developer Playground at https://appcenter.intuit.com/Playground/OAuth/IA and use the info from step #5 to obtain the Access Token and Access Token Secret.
7) Change the variables in Step #3 to the correct values. For QB_REALM_ID, this is the Company ID. You can get this in the sandbox by logging into https://developer.intuit.com/v2/ui#/sandbox and looking for Company ID.
7) add the following code below the code from step #3 above
print qb.get_report('ProfitAndLoss','summarize_column_by=Month&start_date=2014-01-01&end_date=2014-12-31')
I use the above dates b/c the Quickbooks Sandbox company has no Income/Expense data in 2015, so you have to pick dates in 2014.
8) IMPORTANT: To use with the Quickbooks Sandbox for reporting purposes, you need to change the get_report() function to use the base_url_v3 instead of being hard-coded to the production URL.
Look for a row in the get_report() function that looks like this:
url = "https://quickbooks.api.intuit.com/v3/company/%s/" % \
and change it to this:
url = self.base_url_v3 + "/company/%s/" % \
9) Now you can change base_url_v3 all the way at the top to this:
base_url_v3 = "https://sandbox-quickbooks.api.intuit.com/v3"
10) And now you should now be able to run:
python quickbooks2.py
You should see a bunch of JSON data from the Quickbooks Sandbox company.
11) You can explore a bit to test out the appropriate URLs here: https://developer.intuit.com/apiexplorer?apiname=V3QBO#Reports
12) The report reference is here: https://developer.intuit.com/docs/0100_accounting/0400_references/reports and this shows you which parameters you can use. To test parameters in the Explorer, you enter them in the "Request Body" section.
I struggled with this for a while and finally figured it out. Hope this helps someone else.
I do not have much experience with Python but someone had shared this code with me for oauth earlier.If you have additional questions on the code, I will not be able to answer them.
NOTE: The below code also makes calls to V2 QBO apis. Please do not use that part as it is deprecated.
See if it helps-
Import Python
from rauth import OAuth1Session, OAuth1Service
import xml.etree.ElementTree as ET
import xmltodict
class QuickBooks():
"""A wrapper class around Python's Rauth module for Quickbooks the API"""
access_token = ''
access_token_secret = ''
consumer_key = ''
consumer_secret = ''
company_id = 0
callback_url = ''
session = None
base_url_v3 = "https://quickbooks.api.intuit.com/v3"
base_url_v2 = "https://qbo.intuit.com/qbo1"
request_token_url = "https://oauth.intuit.com/oauth/v1/get_request_token"
access_token_url = "https://oauth.intuit.com/oauth/v1/get_access_token"
authorize_url = "https://appcenter.intuit.com/Connect/Begin"
# Things needed for authentication
qbService = None
request_token = ''
request_token_secret = ''
def __init__(self, **args):
if 'consumer_key' in args:
self.consumer_key = args['consumer_key']
if 'consumer_secret' in args:
self.consumer_secret = args['consumer_secret']
if 'access_token' in args:
self.access_token = args['access_token']
if 'access_token_secret' in args:
self.access_token_secret = args['access_token_secret']
if 'company_id' in args:
self.company_id = args['company_id']
if 'callback_url' in args:
self.callback_url = args['callback_url']
def get_authorize_url(self):
"""Returns the Authorize URL as returned by QB,
and specified by OAuth 1.0a.
:return URI:
"""
self.qbService = OAuth1Service(
name = None,
consumer_key = self.consumer_key,
consumer_secret = self.consumer_secret,
request_token_url = self.request_token_url,
access_token_url = self.access_token_url,
authorize_url = self.authorize_url,
base_url = None
)
self.request_token, self.request_token_secret = self.qbService.get_request_token(
params={'oauth_callback':self.callback_url}
)
return self.qbService.get_authorize_url(self.request_token)
def get_access_tokens(self, oauth_verifier):
"""Wrapper around get_auth_session, returns session, and sets
access_token and access_token_secret on the QB Object.
:param oauth_verifier: the oauth_verifier as specified by OAuth 1.0a
"""
session = self.qbService.get_auth_session(
self.request_token,
self.request_token_secret,
data={'oauth_verifier': oauth_verifier})
self.access_token = session.access_token
self.access_token_secret = session.access_token_secret
return session
def create_session(self):
if self.consumer_secret and self.consumer_key and self.access_token_secret and self.access_token:
# print "hi"
session = OAuth1Session(self.consumer_key,
self.consumer_secret,
self.access_token,
self.access_token_secret,
)
# print session
self.session = session
else:
pass
#TODO: raise an error
return self.session
def keep_trying(self, r_type, url, header_auth, realm, payload=''):
if self.session != None:
session = self.session
else:
session = self.create_session()
self.session = session
trying = True
tries = 0
while trying:
print url
tries += 1
if "v2" in url:
r = session.request(r_type, url, header_auth, realm, data=payload)
r_dict = xmltodict.parse(r.text)
# print "DICT", r_dict
if "FaultInfo" not in r_dict or tries > 4:
trying = False
else:
# url = "https://qb.sbfinance.intuit.com/v3/company/184010684/query?query=SELECT * FROM JournalEntry"
# url = "https://quickbooks.api.intuit.com/v3/company/184010684/journalentry/24772"
# url = "https://quickbooks.api.intuit.com/v3/company/184010684/query?query='SELECT+*+FROM+JournalEntry'"
# url = "https://qb.sbfinance.intuit.com/v3/company/184010684/query?query=SELECT%20%2A%20FROM%20JournalEntry&"
print url, r_type
headers = {'Accept': 'application/json'}
r = session.request(r_type, url, header_auth, realm, headers = headers)
# r.headers
print "\n\n INITIAL TEXT \n\n", r.text
print "request headers:", r.request.headers
print "request URL:", r.request.url
print "response headers:", r.headers
r_dict = r.text
if "Fault" not in r_dict or tries > 4:
trying = False
r_dict = []
return r_dict
def fetch_customer(self, pk):
if pk:
url = self.base_url_v2 + "/resource/customer/v2/%s/%s" % ( self.company_id, pk)
r_dict = self.keep_trying("GET", url, True, self.company_id)
return r_dict['Customer']
def fetch_customers(self, all=False, page_num=0, limit=10):
if self.session != None:
session = self.session
else:
session = self.create_session()
self.session = session
# We use v2 of the API, because what the fuck, v3.
url = self.base_url_v2
url += "/resource/customers/v2/%s" % (self.company_id)
customers = []
if all:
counter = 1
more = True
while more:
payload = {
"ResultsPerPage":30,
"PageNum":counter,
}
trying = True
# Because the QB API is so iffy, let's try until we get an non-error
# Rewrite this to use same code as above.
while trying:
r = session.request("POST", url, header_auth = True, data = payload, realm = self.company_id)
root = ET.fromstring(r.text)
if root[1].tag != "{http://www.intuit.com/sb/cdm/baseexceptionmodel/xsd}ErrorCode":
trying = False
else:
print "Failed"
session.close()
qb_name = "{http://www.intuit.com/sb/cdm/v2}"
for child in root:
# print child.tag, child.text
if child.tag == "{http://www.intuit.com/sb/cdm/qbo}Count":
if int(child.text) < 30:
more = False
print "Found all customers"
if child.tag == "{http://www.intuit.com/sb/cdm/qbo}CdmCollections":
for customer in child:
customers += [xmltodict.parse(ET.tostring(customer))]
counter += 1
# more = False
# print more
else:
payload = {
"ResultsPerPage":str(limit),
"PageNum":str(page_num),
}
r = session.request("POST", url, header_auth = True, data = payload, realm = self.company_id)
root = ET.fromstring(r.text)
#TODO: parse for all customers
return customers
def fetch_sales_term(self, pk):
if pk:
url = self.base_url_v2 + "/resource/sales-term/v2/%s/%s" % ( self.company_id, pk)
r_dict = self.keep_trying("GET", url, True, self.company_id)
return r_dict
def fetch_invoices(self, **args):
if "query" in args:
payload = ""
if "customer" in args['query']:
payload = {
"Filter":"CustomerId :Equals: %s" % (args['query']['customer'])
}
# while more:
url = self.base_url_v2 + "/resource/invoices/v2/%s/" % (self.company_id)
r_dict = self.keep_trying("POST", url, True, self.company_id, payload)
invoices = r_dict['qbo:SearchResults']['qbo:CdmCollections']['Invoice']
return invoices
elif "pk" in args:
# TODO: Not tested
url = self.base_url_v2 + "/resource/invoice/v2/%s/%s" % ( self.company_id, args['pk'])
r_dict = self.keep_trying("GET", url, True, self.company_id)
return r_dict
else:
url = self.base_url_v2 + "/resource/invoices/v2/%s/" % (self.company_id)
r_dict = self.keep_trying("POST", url, True, self.company_id, payload)
return "BLAH"
def fetch_journal_entries(self, **args):
""" Because of the beautiful way that journal entries are organized
with QB, you're still going to have to filter these results for the
actual entity you're interested in. Luckily it only returns the entries
that are relevant to your search
:param query: a dictionary that includes 'customer', and the QB id of the
customer
"""
if "query" in args:
payload = {}
more = True
counter = 1
journal_entries = []
if "customer" in args['query']:
payload = {
"Filter":"CustomerId :Equals: %s" % (args['query']['customer'])
}
# payload = {
# "query":"SELECT * FROM JournalEntry",
# }
while more:
payload["ResultsPerPage"] = 30
payload["PageNum"] = counter
# url = self.base_url_v2 + "/resource/journal-entries/v2/%s/" % (self.company_id)
# url = self.base_url_v3 + "/company/%s/query" % (self.company_id)
url = "https://qb.sbfinance.intuit.com/v3/company/184010684/query?query=SELECT%20%2A%20FROM%20JournalEntry&"
r_dict = self.keep_trying("GET", url, True, self.company_id, payload)
more = False
# print r_dict['qbo:SearchResults']['qbo:Count']
counter = counter + 1
# if int(r_dict['qbo:SearchResults']['qbo:Count']) < 30:
# more = False
# journal_entry_set = r_dict['qbo:SearchResults']['qbo:CdmCollections']['JournalEntry']
# journal_entries += [journal_entry_set]
return []
# return r_dict['qbo:SearchResults']['qbo:CdmCollections']['JournalEntry']
elif "pk" in args:
# TODO: Not Tested
url = self.base_url_v2 + "/resource/journal-entry/v2/%s/%s" % ( self.company_id, args['pk'])
r_dict = self.keep_trying("GET", url, True, self.company_id)
return r_dict
else:
url = self.base_url_v2 + "/resource/journal-entries/v2/%s/" % (self.company_id)
r_dict = self.keep_trying("POST", url, True, self.company_id)
print r_dict
return "BLAH"