Accessing an uploaded file's url - python

My application for sends and receives data from/to my phone - basically a 2-way communication through the pushbullet API.
I am trying to take a file from my phone and when it's uploaded do something with it, (play it for example if it's an audiofile).
But when I upload the file on my phone and then I list the pushes on my computer and get that exact push, the file-URLL is restricted.
I got following XML error-response showing "Access Denied" as message:
403: Forbidden
How would I approach this?
Here is the code for the application:
def play_sound(url):
#open the url and then write the contents into a local file
open("play.mp3", 'wb').write(urlopen(url))
#playsound through the playsound library
playsound("play.mp3", False)
pb = pushbullet.Pushbullet(API_KEY, ENCRYPTION_PASSWORD)
pushes = pb.get_pushes()
past_pushes = len(pushes)
while True:
time.sleep(3)
# checks for new pushes on the phone and then scans them for commands
pushes = pb.get_pushes()
number_pushes = len(pushes) - past_pushes
if number_pushes != 0:
past_pushes = (len(pushes) - number_pushes)
try:
for i in range(number_pushes):
push = pushes[i]
push_body = push.get("body")
if push_body is not None:
play = False
if push_body == "play":
play = True
elif play:
#only runs if the user has asked to play something
#beforehand
play = False
url = push.get('file_url')
#play sound from url
#this is where I get my 403: forbidden error
if url is not None and ".mp3" in url:
play_sound(url)
except Exception as e:
print(e)

From the docs...
To authenticate for the API, use your access token in a header like Access-Token: <your_access_token_here>.
You're using urlopen(url) without any header information, so the request is denied.
So, try something like the following
from urllib.request import Request, urlopen
req = Request('https://dl.pushbulletusercontent.com/...')
req.add_header('Access-Token', '<your token here>')
content = urlopen(req).read()
with open('sound.mp3', 'wb') as f:
f.write(content)
Reference: How do I set headers using python's urllib?

Related

Telegram Bot not responding after 3rd function call

The issue is that the third function never seems to respond.
I haven't been able to find a reason why this happens in the telegram documentation.
Please let me know if you have this issue or seen it and know the solution.
Even a post that references an issue like this would work.
Thank you so much for the assistance.
from email import message
import os
import re
import html
import json
import telebot
import requests
import http.client
from pytube import *
from dotenv import load_dotenv
load_dotenv()
# Creating hiding, and using API Keys
API_KEY = os.getenv("API_KEY")
RAPID_KEY = os.getenv("RAPID_API")
bot = telebot.TeleBot(API_KEY)
#bot.message_handler(commands="start")
# Creating a help message for guidance on how to use bot.
def help(message):
# Trying to send help message, if unable to send, throw an error message for the user.
try:
bot.send_message(message.chat.id, "Use \"Youtube\" and the video name to search for a video.\n")
except:
bot.send_message(message.chat.id, "There was an error fetching help, the bot may be offline.\n")
# Checking data and seeing if the word "YouTube" was used in order to start the search
def data_validation(message):
query = message.text.split()
if("youtube" not in query[0].lower()): # Set flag false if regular text
return False
else:
return True
#bot.message_handler(func=data_validation)
# Searching for youtube videos
# using RAPID API
def search(message):
query = message.text.split()
# Check if data is valid, and change variable to be lowercase for easy use.
if(data_validation(message) == True and query[0].lower() == "youtube"):
try:
if(data_validation(message) == True and query[1].lower() != "-d"):
# Removing the word "YouTube" and sending the results to the YouTube search engine.
for item in query[:]:
if(item.lower() == "youtube"):
query.remove(item)
search_query = ' '.join(query)
else:
pass #If it's not term we're looking to convert, ignore it.
# RAPID API for Youtube
try:
url = "https://youtube-search-results.p.rapidapi.com/youtube-search/"
querystring = {"q":search_query}
headers = {
"X-RapidAPI-Key": RAPID_KEY,
"X-RapidAPI-Host": "youtube-search-results.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring) # Grabbing response information from URL
request = json.loads(response.text) # Parsing json string for python use
# Testing to see if the RAPID API service responds and is online.
if(response.status_code == 503):
# If the service is not online, let the user know.
bot.send_message(message.chat.id, f"The RAPID API service appears to be offline try back later.\n")
if(response.status_code == 429):
# If the service has reached max quota for the day, let the user know.
bot.send_message(message.chat.id, f"Max quota reached, try back in 24 hours.\n")
# Grabbing first link from json text and sending direct url and title.
first_link = str((request["items"][0]["url"]))
bot.send_message(message.chat.id, f"{first_link}\n") # Sending first link that was queried.
# If there are no results found for the requested video, sending an error message to alert the user.
except:
bot.send_message(message.chat.id, "Unable to load video.\n")
except:
pass #ignoring if not the phrase we're looking for.
def test(message):
string = message.text.split()
print(string)
if(string[0] == "test" and data_validation(message) == True):
print("This is a test and i should be printed")
bot.send_message(message.chat.id, "Test message")
# Stay alive function for bot pinging / communication
bot.infinity_polling(1440)
The first problem in your code is your first line
from email import message
You import the message from email and also pass a parameter to the data_validation function with the same name, then return False in the data_validation function. If you return false, the function never will be executed.
first give an alias to first line you imported
Try This
from email import message as msg
import os
import re
import html
import json
import telebot
import requests
import http.client
from pytube import *
from dotenv import load_dotenv
load_dotenv()
# Creating hiding, and using API Keys
API_KEY = os.getenv("API_KEY")
RAPID_KEY = os.getenv("RAPID_API")
bot = telebot.TeleBot(API_KEY)
# Creating a help message for guidance on how to use bot.
#bot.message_handler(commands=["start"])
def help(message):
# Trying to send help message, if unable to send, throw an error message for the user.
try:
bot.send_message(message.chat.id, "Use \"Youtube\" and the video name to search for a video.\n")
except:
bot.send_message(message.chat.id, "There was an error fetching help, the bot may be offline.\n")
# Checking data and seeing if the word "YouTube" was used in order to start the search
def data_validation(message):
query = message.text.split()
print(query)
if("youtube" not in query[0].lower()): # Set flag false if regular text
return False # if you return false, the function never will be executed
else:
return True
# Searching for youtube videos
# using RAPID API
#bot.message_handler(func=data_validation)
def search(message):
query = message.text.split()
print(query) # if function executed you see the query result
# Check if data is valid, and change variable to be lowercase for easy use.
if(data_validation(message) == True and query[0].lower() == "youtube"):
try:
if(data_validation(message) == True and query[1].lower() != "-d"):
# Removing the word "YouTube" and sending the results to the YouTube search engine.
for item in query[:]:
if(item.lower() == "youtube"):
query.remove(item)
search_query = ' '.join(query)
else:
pass #If it's not term we're looking to convert, ignore it.
# RAPID API for Youtube
try:
url = "https://youtube-search-results.p.rapidapi.com/youtube-search/"
querystring = {"q":search_query}
headers = {
"X-RapidAPI-Key": RAPID_KEY,
"X-RapidAPI-Host": "youtube-search-results.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring) # Grabbing response information from URL
request = json.loads(response.text) # Parsing json string for python use
# Testing to see if the RAPID API service responds and is online.
if(response.status_code == 503):
# If the service is not online, let the user know.
bot.send_message(message.chat.id, f"The RAPID API service appears to be offline try back later.\n")
if(response.status_code == 429):
# If the service has reached max quota for the day, let the user know.
bot.send_message(message.chat.id, f"Max quota reached, try back in 24 hours.\n")
# Grabbing first link from json text and sending direct url and title.
first_link = str((request["items"][0]["url"]))
bot.send_message(message.chat.id, f"{first_link}\n") # Sending first link that was queried.
# If there are no results found for the requested video, sending an error message to alert the user.
except:
bot.send_message(message.chat.id, "Unable to load video.\n")
except:
pass #ignoring if not the phrase we're looking for.
def test(message):
string = message.text.split()
print(string)
if(string[0] == "test" and data_validation(message) == True):
print("This is a test and i should be printed")
bot.send_message(message.chat.id, "Test message")
# Stay alive function for bot pinging / communication
bot.infinity_polling(1440)
I found that using "if name == 'main':" and keeping all the functions in "main():" as a function handler everything ran smoothly.
I'm still trying to figure out why this works.

atlassian-python-api exception by using confluence.delete_attachment()

i try to delete all attachments from a site with confluence.delete_attachment(page_id, filename, version), butt it throws always the same exception:
requests.exceptions.HTTPError: 401 Client Error: Authorization Required for url:
I am currently deleting the attachments as follows:
from atlassian import Confluence
confluence_url = 'https://xxx/confluence/'
confluence_usr = os.environ.get('CONFLUENCE_CREDS_USR')
confluence_psw = os.environ.get('CONFLUENCE_CREDS_PSW')
confluence = Confluence(url=confluence_url, username=confluence_usr, password=confluence_psw)
# Check site response, if site not reachable, check 5 times every 5 sec
response = resilient_confluence_http_get_request(f'{confluence_url}rest/api/content/{page_ids[csv_file]}')
# Check site response, if site not reachable, check 5 times every 5 sec
attachment = confluence.get_attachments_from_content(page_ids[csv_file])
print(f"attachment type: {type(attachment)}")
print(f"attachment content:\n {attachment}\n\n")
att_to_del = []
indices = 0
for i in attachment['results']:
print(f"id type: {type(attachment['results'][indices]['id'])}")
att_to_del.append(attachment['results'][indices]['id'])
indices += 1
print(f"delete attachments with id's {att_to_del}")
for id_to_delete in att_to_del:
response = requests.delete(f'https://xxx/confluence/rest/api/content/{id_to_delete}',
auth=(confluence_usr, confluence_psw))
The delete function is the only one that doesn't work. What am I doing wrong?
Here i try to delete with confluence.delete_attachment(...):
def response_wait_repeat():
attachments = confluence.get_attachments_from_content('my_site_id')
print(attachments)
confluence.delete_attachment('my_site_id', 'my_filename', version=None)
I assume you are using atlassian-python-api
as the examples on the pypi documentation, you first need to create a connection to the confluence:
from atlassian import Confluence
confluence = Confluence(
url='http://localhost:8090',
username='admin',
password='admin')
status = confluence.create_page(
space='DEMO',
title='This is the title',
body='This is the body. You can use <strong>HTML tags</strong>!')
print(status)
If you are still getting this error (401 Authorization Required) I would check manually in confluence, whether you have the permission to do these such actions.
It's probably the case, because you are saying the only action you cannot preform is delete.
otherwise I would contact the confluence admin.

Google Drive API:How to download files from google drive?

access_token = ''
import json
r = session.request('get', 'https://www.googleapis.com/drive/v3/files?access_token=%s' % access_token)
response_text = str(r.content, encoding='utf-8')
files_list = json.loads(response_text).get('files')
files_id_list = []
for item in files_list:
files_id_list.append(item.get('id'))
for item in files_id_list:
file_r = session.request('get', 'https://www.googleapis.com/drive/v3/files/%s?alt=media&access_token=%s' % (item, access_token))
print(file_r.content)
I use the above code and Google shows:
We're sorry ...
... but your computer or network may be sending automated queries. To protect our users, we can't process your request right now.
I do n’t know if this method ca n’t be downloaded originally, or where is the problem?
The reason you are getting this error is you are requesting the data in a Loop.
causes so many requests to Google's server.
And hence the error
We're sorry ... ... but your computer or network may be sending automated queries
access_token should not be placed in the request body,We should put access_token in the header.Can try on this site oauthplayground

How to download nasa satellite OPeNDAP data using python

I have tried requests, pydap, urllib, and netcdf4 and keep either getting redirect errors or permission errors when trying to download the following NASA data:
GLDAS_NOAH025SUBP_3H: GLDAS Noah Land Surface Model L4 3 Hourly 0.25 x 0.25 degree Subsetted V001 (http://disc.sci.gsfc.nasa.gov/uui/datasets/GLDAS_NOAH025SUBP_3H_V001/summary?keywords=Hydrology)
I am attempting to download about 50k files, here is an example of one, which works when pasted into google chrome browser (if you have proper username and password):
http://hydro1.gesdisc.eosdis.nasa.gov/daac-bin/OTF/HTTP_services.cgi?FILENAME=%2Fdata%2FGLDAS_V1%2FGLDAS_NOAH025SUBP_3H%2F2016%2F244%2FGLDAS_NOAH025SUBP_3H.A2016244.2100.001.2016256190725.grb&FORMAT=TmV0Q0RGLw&BBOX=-11.95%2C28.86%2C-0.62%2C40.81&LABEL=GLDAS_NOAH025SUBP_3H.A2016244.2100.001.2016286201048.pss.nc&SHORTNAME=GLDAS_NOAH025SUBP_3H&SERVICE=SUBSET_GRIB&VERSION=1.02&LAYERS=AAAB&DATASET_VERSION=001
Anyone have any experience getting OPeNDAP NASA data from the web using python? I am happy to provide more information if desired.
Here is the requests attempt which gives 401 error:
import requests
def httpdownload():
'''loop through each line in the text file and open url'''
httpfile = open(pathlist[0]+"NASAdownloadSample.txt", "r")
for line in httpfile:
print line
outname = line[-134:-122]+".hdf"
print outname
username = ""
password = "*"
r = requests.get(line, auth=("username", "password"), stream=True)
print r.text
print r.status_code
with open(pathlist[0]+outname, 'wb') as out:
out.write(r.content)
print outname, "finished" # keep track of progress
And here is the pydap example which gives redirect error:
import install_cas_client
from pydap.client import open_url
def httpdownload():
'''loop through each line in the text file and open url'''
username = ""
password = ""
httpfile = open(pathlist[0]+"NASAdownloadSample.txt", "r")
fileone = httpfile.readline()
filetot = fileone[:7]+username+":"+password+"#"+fileone[7:]
print filetot
dataset = open_url(filetot)
I did not find a solution using python, but given the information I have now it should be possible. I used wget with a .netrc file and cookie file shown as follows (https://disc.gsfc.nasa.gov/information/howto?title=How%20to%20Download%20Data%20Files%20from%20HTTP%20Service%20with%20wget):
#!/bin/bash
cd # path to output files
touch .netrc
echo "machine urs.earthdata.nasa.gov login <username> password <password>" >> .netrc
chmod 0600 .netrc
touch .urs_cookies
wget --content-disposition --trust-server-names --load-cookies ~/.urs_cookies --save-cookies ~/.urs_cookies --auth-no-challenge=on --keep-session-cookies
-i <path to text file of url list>
Hope it helps anyone else working with NASA data from this server.
I realize it's a bit late to answer this question for the original poster, but I stumbled across this question while trying to do the same thing so I'll leave my solution here. It seems the NASA server uses redirects and Basic Authorization in a way the standard libraries don't expect. When you download from (for example) https://hydro1.gesdisc.eosdis.nasa.gov, you'll get redirected to https://urs.earthdata.nasa.gov for authentication. That server sets an authentication token as a cookie and redirects you back to download the file. If you're not handling cookies properly, you'll be stuck in an infinite redirection loop. If you're not handling authentication and redirection properly, you'll get an access denied error.
To get around this problem, chain HTTPRedirectHandler, HTTPCookieProcessor, and HTTPPasswordMgrWithDefaultRealm together and set it as the default opener or just use that opener directly.
from urllib import request
username = "<your username>"
password = "<your password>"
url = "<remote url of file>"
filename = "<local destination of file>"
redirectHandler = request.HTTPRedirectHandler()
cookieProcessor = request.HTTPCookieProcessor()
passwordManager = request.HTTPPasswordMgrWithDefaultRealm()
passwordManager.add_password(None, "https://urs.earthdata.nasa.gov", username, password)
authHandler = request.HTTPBasicAuthHandler(passwordManager)
opener = request.build_opener(redirectHandler,cookieProcessor,authHandler)
request.install_opener(opener)
request.urlretrieve(url,filename)

Facebook Posting to a Page

I am trying to post to the wall of a facebook page that I am administrator (not profile), however no luck. How do I achieve this ? I'm stucked at the page access token retrieval part.
#!/usr/bin/python
# coding: utf-8
import facebook
import urllib
import urlparse
import subprocess
import warnings
# Hide deprecation warnings. The facebook module isn't that up-to-date (facebook.GraphAPIError).
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Parameters of your app and the id of the profile you want to mess with.
FACEBOOK_APP_ID = 'XXXXXXXXXXXXXX'
FACEBOOK_APP_SECRET = 'XXXXXXXXXXXXXXXXXXXXX'
FACEBOOK_PROFILE_ID = 'XXXXXXXXXXX'
# Trying to get an access token. Very awkward.
oauth_args = dict(client_id = FACEBOOK_APP_ID,
client_secret = FACEBOOK_APP_SECRET,
scope = 'manage_pages',
response_type = 'token'
)
oauth_curl_cmd = ['curl',
'https://graph.facebook.com/oauth/access_token?' + urllib.urlencode(oauth_args)]
oauth_response = subprocess.Popen(oauth_curl_cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()[0]
print urllib.urlencode(oauth_args)
try:
oauth_access_token = urlparse.parse_qs(str(oauth_response))['access_token'][0]
except KeyError:
print('Unable to grab an access token!')
exit()
print oauth_access_token
facebook_graph = facebook.GraphAPI(oauth_access_token)
# Try to post something on the wall.
try:
fb_response = facebook_graph.put_wall_post('Hello from Python', \
profile_id = FACEBOOK_PROFILE_ID)
print fb_response
except facebook.GraphAPIError as e:
print 'Something went wrong:', e.type, e.message
I would not recommend doing this through the command line with curl as it is less secure and less reliable. You can do all of this with the urllib2 and json modules
to get the access token you just want to make a call to https://graph.facebook.com/oauth/access_token?client_id=YOUR_APP_ID&client_secret=YOUR_APP_SECRET&grant_type=client_credentials
so you would do:
url='https://graph.facebook.com/oauth/access_token?client_id=YOUR_APP_ID&client_secret=YOUR_APP_SECRET&grant_type=client_credentials'
target=urllib2.urlopen(url)
token = target.read()[13:]
EDIT:
My bad, I forgot that facebook/oauth gives you the access token in plain text so you don't need the json module. I've updated the example to show what you should be doing. Note target.read() will give you the string 'access_token=ACCESS_TOKEN' and then you are just parsing it to remove the identifier.
to see what response is go to the url and put in your information you will a json dict with acess_token.
the second half of this page should have all the information you need.

Categories