Python Upload Files to Sharepoint using Shareplum - python

I am using this much-shared code to try and upload a file to Sharepoint using Shareplum, into the Shared Documents folder.
import requests
from shareplum import Office365
# Set Login Info
username = 'my.email#address.com'
password = 'myverifiedapppassword'
site_name = 'mysite'
base_path = 'https://xxxxxxxx.sharepoint.com'
doc_library = 'Shared%20Documents'
file_name = "hellotest.txt" #when your file in the same directory
# Obtain auth cookie
authcookie = Office365(base_path, username=username, password=password).GetCookies()
session = requests.Session()
session.cookies = authcookie
session.headers.update({'user-agent': 'python_bite/v1'})
session.headers.update({'accept': 'application/json;odata=verbose'})
session.headers.update({'X-RequestDigest': 'FormDigestValue'})
response = session.post(url=base_path + "/sites/" + site_name + "/_api/web/GetFolderByServerRelativeUrl('" + doc_library + "')/Files/add(url='a.txt',overwrite=true)",
data="")
session.headers.update({'X-RequestDigest': response.headers['X-RequestDigest']})
# Upload file
with open(file_name, 'rb') as file_input:
try:
response = session.post(
url=base_path + "/sites/" + site_name + f"/_api/web/GetFolderByServerRelativeUrl('" + doc_library + "')/Files/add(url='"
+ file_name + "',overwrite=true)",
data=file_input)
print("response: ", response.status_code) #it returns 200
if response.status_code == '200':
print("File uploaded successfully")
except Exception as err:
print("Something went wrong: " + str(err))
print('File Uploaded Successfully')
The problem is occuring wheen running the code....i am always getting a traceback and a keyerror as follows:
Traceback (most recent call last):
File "S:\upload.py", line 22, in
session.headers.update({'X-RequestDigest': response.headers['X-RequestDigest']})
File "C:\Python39\lib\site-packages\requests\structures.py", line 54, in getitem
return self._store[key.lower()][1]
KeyError: 'x-requestdigest'
Something to do with x-requestdigest isnt working properly, in line 22, but i cannot figure out what.
Any tips would be greatly appreciated!!!
thanks

I have tried the below code and it is working.
from shareplum import Office365
from shareplum import Site
from shareplum.site import Version
#Logging info
server_url = "https://example.sharepoint.com/"
site_url = server_url + "sites/my_site_name"
Username = 'myusername'
Password = 'mypassword'
Sharepoint_folder = 'Shared Documents'
fileName = 'myfilename'
def file_upload_to_sharepoint(**context):
authcookie = Office365(server_url, username = Username, password=Password).GetCookies()
site = Site(site_url, version=Version.v365, authcookie=authcookie)
folder = site.Folder(Sharepoint_folder)
with open(fileName, mode='rb') as file:
fileContent = file.read()
folder.upload_file(fileContent, "filename.bin")
file_upload_to_sharepoint()
Let me know if this works for you as well.

Related

upload image from pixabay api to wordpress using rest api and python

I am new to python,and want to know upload image from pixabay api or others source to wordpress using rest api and python.
When i use this :
url=url_image = "https://pixabay.com/api/?key={API_KEY}&q={keyword}.jpg"
They show this message "
{"code":"rest_upload_unknown_error","message":"Sorry, you are not
allowed to upload this file type.","data":{"status":500}}"
import base64, requests
from tempfile import NamedTemporaryFile
# keyword = input('Enter Your name')
keyword = 'flower'
def header(user, password):
credentials = user + ':' + password
token = base64.b64encode(credentials.encode())
header_json = {'Authorization': 'Basic ' + token.decode('utf-8'),
'Content-Disposition' : 'attachment; filename=%s'% "test1.jpg"}
return header_json
def upload_image_to_wordpress(file_path, header_json):
media = {'file': file_path,'caption': 'f{keyword}'}
responce = requests.post("https://yourwebsite.com/wp-json/wp/v2/media", headers = header_json, files = media)
print(responce.text)
heder = header("username","password") #username, application password
url_image = "https://pixabay.com/api/?key={API_KEY}&q={keyword}.jpg"
# url="https://cdn.pixabay.com/photo/2021/11/30/08/24/strawberries-6834750_1280.jpg"
raw = requests.get(f'{url_image}').content
with NamedTemporaryFile(delete=False,mode="wb",suffix=".jpg") as img :
img.write(raw)
# print(f.file())
c = open(img.name,"rb")
upload_image_to_wordpress(c,heder)

Get path/url of some files from sharepoint (python)

i need create program to get path/url of some files (i got excel with name of these files). I find solution on the internet but it doesn't work.
print("connecting")
baseurl = 'https://mc2.sharepoint.com/'
basesite = '/sites/SES'
siteurl = baseurl + basesite
ctx_auth = AuthenticationContext(siteurl)
ctx_auth.acquire_token_for_user("login", "password")
ctx = ClientContext(siteurl, ctx_auth)
print("connected")
wb = load_workbook('path.xlsx')
ws = wb.active
for i in range(2, ws.max_row+1):
qry_txt = str(ws.cell(i, 1).value) + '.pdf'
print(qry_txt)
doc_lib = ctx.web.lists.get_by_title(qry_txt)
items = doc_lib.items.select(["FileSystemObjectType"]).expand(["File", "Folder"]).get().execute_query()
for item in items:
if item.file_system_object_type == FileSystemObjectType.Folder:
print("Folder url: {0}".format(item.folder.serverRelativeUrl))
else:
print("File url: {0}".format(item.file.serverRelativeUrl))
ClientRequestException: (None, None, "404 Client Error: Not Found for url: https://mc2.sharepoint.com//sites/SES/_api/Web/lists/GetByTitle('qry_txt')/items?$select=FileSystemObjectType&$expand=File,Folder")

Upload file in SharePoint using python

'm trying to upload a file using Python Script, when Run the code it gives me no Error but was not able to upload the file in my sharepoint folder.
import requests
from shareplum import Office365
from config import config
# get data from configuration
username = config['sp_user']
password = config['sp_password']
site_name = config['sp_site_name']
base_path = config['sp_base_path']
doc_library = config['sp_doc_library']
file_name = "cat_pic.jpg"
# Obtain auth cookie
authcookie = Office365(base_path, username=username, password=password).GetCookies()
session = requests.Session()
session.cookies = authcookie
session.headers.update({'user-agent': 'python_bite/v1'})
session.headers.update({'accept': 'application/json;odata=verbose'})
# perform the actual upload
with open( file_name, 'rb') as file_input:
try:
response = session.post(
url=base_path + "/sites/" + site_name + "/Shared%20Documents/Forms/AllItems.aspx/_api/web/GetFolderByServerRelativeUrl('" + doc_library + "')/Files/add(url='"
+ file_name + "',overwrite=true)",
data=file_input)
except Exception as err:
print("Some error occurred: " + str(err))
config.py
config = dict()
config['sp_user'] = 'email'
config['sp_password'] = 'pass
config['sp_base_path'] = 'https://bboxxeng.sharepoint.com'
config['sp_site_name'] = 'TESTIAN'
config['sp_doc_library'] = 'Test'
This is the url of my sharepoint https://bboxxeng.sharepoint.com/sites/TESTIAN/Shared%20Documents/Forms/AllItems.aspx I've already created a folder in it named Test...
Thank you for answering my question.
Modify the code as below.
response = session.post(
url=base_path + "/sites/" + site_name + "/_api/web/GetFolderByServerRelativeUrl('Shared%20Documents/"+doc_library+"')/Files/add(url='"
+ file_name + "',overwrite=true)",
data=file_input)

Issue downloading csv file from website in Python

So I am trying to download and write a csv file onto my computer from a site that requires my Email Address and password as authentication for the site. I have the following code:
import cStringIO
import pycurl
import urllib
url = 'http://www.riglocator.ca/report=rig%2Frig%2D150226%2Ecsv'
def GetPage(url, proxy=None):
if proxy:
port = 8888
proxy = proxy.replace("socks://", "")
if ":" in proxy:
port = int(proxy.rsplit(":", 1)[1])
proxy = proxy.rsplit(":", 1)[0]
try:
buf = cStringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEFUNCTION, buf.write)
c.setopt(c.CONNECTTIMEOUT, 5)
c.setopt(c.TIMEOUT, 8)
if proxy:
c.setopt(pycurl.PROXY, proxy)
c.setopt(pycurl.PROXYPORT, port)
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
c.setopt(pycurl.USERPWD, 'john#mail.com:password123')
c.setopt(c.FOLLOWLOCATION, True)
c.perform()
c.close()
results = buf.getvalue()
buf.close()
except:
results = ""
return results
GetPage(url,"socks://127.0.0.1:8888")
def loader():
csv_url = GetPage(url,"socks://127.0.0.1:8888")
r = urllib.urlopen(csv_url)
print(r)
csv = r.read()
csv_str = str(csv)
lines = csv_str.split('\\n')
dest_url = r'mapfile.csv'
fx = open(dest_url, 'w')
for line in lines:
fx.write(line + '\n')
fx.close()
loader()
But this still returns the HTML code from the login page, any suggestions?
I am getting this error:
File "C:/Users/cevans/PycharmProjects/RigLocatorMapPull/rigmapscrape.py", line 55, in <module>
loader()
File "C:/Users/cevans/PycharmProjects/RigLocatorMapPull/rigmapscrape.py", line 44, in loader
r = urllib.urlopen(csv_url)
File "C:\Python27\lib\urllib.py", line 87, in urlopen
return opener.open(url)
File "C:\Python27\lib\urllib.py", line 208, in open
return getattr(self, name)(url)
File "C:\Python27\lib\urllib.py", line 463, in open_file
return self.open_local_file(url)
File "C:\Python27\lib\urllib.py", line 477, in open_local_file
raise IOError(e.errno, e.strerror, e.filename)
IOError: [Errno 2] The system cannot find the path specified: ''
Process finished with exit code 1
Here is a link to some code I wrote to grab a file with pycurl, it should do basically what you need to do. You just need to add the option c.setopt(pycurl.USERPWD, 'username:userpass') do my code to set your username and password.
http://prestongarrison.com/proper-python-pycurl-example/
#This is a solution using the Mechanize browser library which takes the url,
#changes it to the current date, submits the username/password in a form,
#downloads a csv and writes it to a folder location:
__author__ = 'cevans'
import mechanize
import os
import cookielib
import datetime, string
USERNAME = 'xxxx'
PASSWORD = 'xxxxx'
OLDURL = 'http://www.oldurl.com/report050301'
folder = r'\\Driver'
def loader():
#Takes current date and changes URL to grab correct datefile (Schedule only runs on day of week)
cdate = str(datetime.date.today().strftime("%y%m%d"))
DATAURL = string.replace(OLDURL,'150301',cdate)
# Browser and Cookie Jar
br = mechanize.Browser()
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(False)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(True)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Opens site:
r = br.open(DATAURL)
html = r.read()
br.select_form(nr=0)
br.form['nauthemail']= USERNAME
br.form['password']=PASSWORD
br.submit()
r = br.open(DATAURL)
#Read and write file to csv, in folder
csv = r.read()
csv_str = str(csv)
lines = csv_str.split('\\n')
fname = 'map-'+ cdate
base_filename=fname
filename_suffix = '.csv'
folder1 = os.path.join(folder, base_filename + filename_suffix)
dest_url = folder1
fx = open(dest_url, 'w')
for line in lines:
fx.write(line + '\n')
fx.close()
loader()

Getting 403 error when trying to parse dropbox events page with python and mechanize

I use this script to get a list of all file updates to a certain directory. I then parse that list to get a list of time slots I have been active in that directory. That way I can quickly see how much time I have spent on the project and know what to charge my client.
I have written a small python script, adapted from this: https://github.com/jncraton/PythonDropboxUploader
I added the bottom function to retrieve a specific events page from https://www.dropbox.com/events?ns=false&n=50
I have used the script before 2 months ago and it worked well, but now I am getting 403: forbidden errors on:
eventSrc = self.browser.open(req).read()
Probably DropBox tries to block scrapers like mine to push programmers to use their API instead, but unfortunately the API doesn't support listing the events.
Can anybody help me out to get it working again?
This is the python code to create the connection:
import mechanize
import urllib
import re
import json
class DropboxConnection:
""" Creates a connection to Dropbox """
email = ""
password = ""
root_ns = ""
token = ""
browser = None
def __init__(self, email, password):
self.email = email
self.password = password
self.login()
self.get_constants()
def login(self):
""" Login to Dropbox and return mechanize browser instance """
# Fire up a browser using mechanize
self.browser = mechanize.Browser()
self.browser.set_handle_equiv(False)
self.browser.set_handle_redirect(True)
self.browser.set_handle_referer(True)
self.browser.set_handle_robots(False)
self.browser.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:14.0) Gecko/20120722 Firefox/14.0.1')]
# Browse to the login page
self.browser.open('https://www.dropbox.com/login')
# Enter the username and password into the login form
isLoginForm = lambda l: l.action == "https://www.dropbox.com/login" and l.method == "POST"
try:
self.browser.select_form(predicate=isLoginForm)
except:
self.browser = None
raise(Exception('Unable to find login form'))
self.browser['login_email'] = self.email
self.browser['login_password'] = self.password
self.browser['t'] = "1230"
# Send the form
response = self.browser.submit()
def get_constants(self):
""" Load constants from page """
home_src = self.browser.open('https://www.dropbox.com/home').read()
try:
self.root_ns = re.findall(r"root_ns: (\d+)", home_src)[0]
self.token = re.findall(r"TOKEN: '(.+)'", home_src)[0]
except:
raise(Exception("Unable to find constants for AJAX requests"))
def upload_file(self, local_file, remote_dir, remote_file):
""" Upload a local file to Dropbox """
if(not self.is_logged_in()):
raise(Exception("Can't upload when not logged in"))
self.browser.open('https://www.dropbox.com/')
# Add our file upload to the upload form
isUploadForm = lambda u: u.action == "https://dl-web.dropbox.com/upload" and u.method == "POST"
try:
self.browser.select_form(predicate=isUploadForm)
except:
raise(Exception('Unable to find upload form'))
self.browser.form.find_control("dest").readonly = False
self.browser.form.set_value(remote_dir, "dest")
self.browser.form.add_file(open(local_file, "rb"), "", remote_file)
# Submit the form with the file
self.browser.submit()
def get_dir_list(self, remote_dir):
""" Get file info for a directory """
if(not self.is_logged_in()):
raise(Exception("Can't download when not logged in"))
req_vars = "ns_id=" + self.root_ns + "&referrer=&t=" + self.token
req = urllib2.Request('https://www.dropbox.com/browse' + remote_dir, data=req_vars)
req.add_header('Referer', 'https://www.dropbox.com/home' + remote_dir)
dir_info = json.loads(self.browser.open(req).read())
dir_list = {}
for item in dir_info['file_info']:
# Eliminate directories
if(item[0] == False):
# get local filename
absolute_filename = item[3]
local_filename = re.findall(r".*\/(.*)", absolute_filename)[0]
# get file URL and add it to the dictionary
file_url = item[8]
dir_list[local_filename] = file_url
return dir_list
def get_download_url(self, remote_dir, remote_file):
""" Get the URL to download a file """
return self.get_dir_list(remote_dir)[remote_file]
def download_file(self, remote_dir, remote_file, local_file):
""" Download a file and save it locally """
fh = open(local_file, "wb")
fh.write(self.browser.open(self.get_download_url(remote_dir, remote_file)).read())
fh.close()
def is_logged_in(self):
""" Checks if a login has been established """
if(self.browser):
return True
else:
return False
def getEventsPage(self, n):
if(not self.is_logged_in()):
raise(Exception("Can't get event page when not logged in"))
url = 'https://www.dropbox.com/next_events'
values = {'cur_page': n, 'ns_id': 'false'}
data = urllib.urlencode(values)
req = mechanize.Request(url, data)
# print url + '?' + data
eventSrc = self.browser.open(req).read()
return eventSrc
And this is the loop that parses the events pages:
from dbupload import DropboxConnection
from getpass import getpass
from bs4 import BeautifulSoup
import re
import parsedatetime.parsedatetime as pdt
import parsedatetime.parsedatetime_consts as pdc
c = pdc.Constants()
p = pdt.Calendar(c)
email = "myemail#gmail.com" # raw_input("Enter Dropbox email address:")
password = getpass("Enter Dropbox password:")
dateFile = open('all_file_updates.txt', "wb")
try:
# Create the connection
conn = DropboxConnection(email, password)
except:
print("Connection failed")
else:
print("Connection succesful")
n = 250
found = 0
while(n >= 0):
eventsPageSrc = conn.getEventsPage(n)
soup = BeautifulSoup(eventsPageSrc)
table = soup.find("table", {"id": "events"})
for row in table.findAll('tr'):
link = row.find("a", href=re.compile('^https://dl-web.dropbox.com/get/ProjectName'))
if(link != None):
dateString = row.find("td", attrs={'class': 'modified'}).string
date = p.parse(dateString)
dateFile.write('Date: ' + str(date) + ' file: ' + link.string + '\n')
found = found + 1
n = n - 1
print 'page: ' + str(n) + ' Total found: ' + str(found)
In def get_constants(self): change
self.token = re.findall(r"TOKEN: '(.+)'", home_src)[0]
to
self.token = re.findall(r'TOKEN: "(.+)"', home_src)[0]
dropbox has changed the way it stores constants
Hope it helps.

Categories