Twitter: Correctly uploading a new profile picture - python

The code below is what I use to upload a profile picture but I get the following error:
def get_image_bytes():
file_size = os.path.getsize('./image/example.jpg')
return file_size
def get_image_raw():
with open("./image/example.jpg", "rb") as img_file:
enc_image = img_file.read()
return enc_image
def get_media_id():
file_bytes = get_image_bytes();
resp = httpx.post(f'https://upload.twitter.com/i/media/upload.json?command=INIT&total_bytes={file_bytes}&media_type=image/jpeg', headers=general_headers)
return resp.json()['media_id_string']
def append_image():
media_id = get_media_id()
resp = httpx.post(f'https://upload.twitter.com/i/media/upload.json?command=APPEND&media_id={media_id}&segment_index=0&media={get_image_raw()}', headers=webkit_headers)
return media_id
def update_profile():
media_id = append_image()
resp = httpx.post(f'https://upload.twitter.com/i/media/upload.json?command=FINALIZE&media_id={media_id}', headers=general_headers)
print(resp.json())
update_profile()
Error:
{'request': '/i/media/upload.json', 'error': 'Segments do not add up to provided total file size.'}
I don't know how twitter wants to receive the image binary.
I've heard word that they expect chunks but I've tried almost everything.
When uploading a new profile picture I do see that they crop the image maybe this can be a reason why but I am to no avail at the moment.
My image is under 5mb by the way.

Related

Bypass google automated query scurity check

i have a list of google drive file links about 300 pdf files which i have to download
so what i am trying to do is using pythons request library i am requesting to google server and getting the files.
after 30 to 36 files download google blocks my requests and return
We're sorry...... but your computer or network may be sending automated queries. To protect our users, we can't process your request right now.
i am using the following code
import requests
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
if response.status_code!=200:
print(response.status_code)
return response.status_code
print('downloading '+ destination)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
i = 0
for chunk in response.iter_content(CHUNK_SIZE):
print(str(i)+'%')
i = i+1
if chunk: # filter out keep-alive new chunks
f.write(chunk)
print('downloaded '+ destination)
if __name__ == "__main__":
file_id = 'file id'
destination = file_id+'.pdf'
download_file_from_google_drive(file_id, destination)
i am iterating download_file_from_google_drive this function using my list
so can i bypass the security check
i tried using vpn which changes my ip address but nothing works.
after about 1hour downloading restart

How to download mail attachment through script?

I am writing a python script to fetch mail attachments through Graph API.
In the Graph Explorer, I can perfectly download file attachments by manually pressing the download button after calling:
https://graph.microsoft.com/v1.0/me/messages/{message-id}/attachments/{attachment-id}/$value
However, when trying to make the same request in my Python script, all I get returned is 'Response [200]' (so the request works, but the file is not reachable).
I try to make the request like this:
def get_mails_json():
requestHeaders = {'Authorization': 'Bearer ' +result["access_token"],'Content-Type': 'application/json'}
queryResults = msgraph_request(graphURI + "/v1.0/me/messages?$filter=isRead ne true",requestHeaders)
return json.dumps(queryResults)
try:
data = json.loads(mails)
values = data['value']
for i in values:
mail_id = i['id']
mail_subj = i['subject']
if i['hasAttachments'] != False:
attachments = o365.get_attachments(mail_id)
attachments = json.loads(attachments)
attachments = attachments['value']
for i in attachments:
details = o365.get_attachment_details(mail_id,i["id"])
except Exception as e:
print(e)
def get_attachment_details(mail,attachment):
requestHeaders = {'Authorization': 'Bearer ' + result["access_token"],'Content-Type': 'application/json'}
queryResults = msgraph_request(graphURI + "/v1.0/me/messages/"+mail+"/attachments/"+attachment+'/$value',requestHeaders)
return json.dumps(queryResults)
Is there a way for me to download the file to AT ALL through my python script ?
I found a simple solution to downloading a file through a python script!
I used chip's answer, found on this thread:
thread containing chip's answer
I make the request for the attachment like so:
def get_attachment_details(mail,attachment):
requestHeaders = {'Authorization': 'Bearer ' + result["access_token"],'Content-Type': 'application/file'}
resource= graphURI + "/v1.0/me/messages/"+mail+"/attachments/"+attachment+'/$value'
payload = {}
results = requests.request("GET", resource,headers=requestHeaders,data=payload, allow_redirects=False)
return results.content
This gets me the encoded bytes of the file, which I then decode and write to a file like so:
for i in attachments:
details = o365.get_attachment_details(mail_id,i["id"])
toread = io.BytesIO()
toread.write(details)
with open(i['name'], 'wb') as f:
f.write(toread.getbuffer())

YouTube categories in Python

I am new to categories in youtube api in Python and I'm encountering issues regarding the language of the videos after I select the region to find videos in.
The problem that I am encountering is that when I enter a region code, it comments on videos that aren't in English even though it's meant to be.
E.G: I enter the region code 'US' and the outcome is what I have attached. It comments on videos that are in a different language.
result [![enter image description here][1]][1]
I have tried to change the region_code="US" in the script but it has problems with 'US' not being defined.
Does anyone know how I can get around this problem or what I'm doing wrong? Thanks
API_KEY = "key"
# This function loads the comments in Comments.txt file
def load_comments(file):
comments = []
f = open(file, 'r', encoding='utf8')
for comment in f:
comments.append(comment)
return comments
def search_video(keyword, region_code, comments):
# Fucntion from Library
from apiclient.discovery import build
import datetime
import time
def get_category_id(youtube, cat):
req = youtube.videoCategories().list(part='snippet', regionCode=region_code)
response = req.execute()
items_list = response["items"]
for item in items_list:
video_category_id = item.get("id")
if video_category_id is not None:
return video_category_id
def search(youtube, video_category_id=None):
if video_category_id is None:
video_category_id = get_category_id(youtube, keyword)
req = youtube.search().list(videoCategoryId=video_category_id, order='date', maxResults=len(comments), # q=keyword,
publishedAfter=(datetime.datetime.utcnow() +
datetime.timedelta(minutes = -1)).isoformat('T')+'Z',
part='snippet',type='video')
return req, video_category_id
api_key = API_KEY
youtube = build('youtube', 'v3', developerKey=api_key)
req, video_category_id = search(youtube)
res = req.execute()
while len(res['items']) == 0:
time.sleep(10)
req, _ = search(youtube, video_category_id) # re-use category id if already found to prevent lag
res = req.execute()
videoid = [res['items'][i]['id']['videoId'] for i in range(len(res['items']))]
return videoid
(It asks what region to use btw)

Sending directory to client from server flask

I am trying to return a directory of images from the server to the client. I'm having a hard time understanding how to format my response so that the client is able to distinguish between the files.
What is the best practice way to accomplish what I am doing? my photos are kept in a tmp folder within the application. Here is my utils.py:
def get_multipart_fields():
final = {}
for file in os.listdir(os.path.dirname(os.path.realpath(__file__)) + '/tmp'):
try:
with open(os.path.dirname(os.path.abspath(__file__)) + '/tmp/' + os.path.join(file), "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
final[os.path.join(file)] = { 'filename': encoded_string }
except Exception as e:
print e
return final.to_string()
and my views.py:
#app.route('/download-files', methods=['GET'])
def gallery():
client = get_client()
resource = get_resource()
download_dir(client, resource, 'test-bucket')
file_names = get_multipart_fields()
m = MultipartEncoder(
fields=file_names
)
return Response(json.dumps(m), mimetype=m.content_type)
Where am I going wrong with this logic ? Any help would be greatly appreciated.

Python save images into the folder

I am using python-instagram API and I am displaying some images with searched tag!
Rather than displaying, I want to save those images so that it can be used for further analysis.
Is it possible? I am new to python and using API's.
Here is my code snippet which does this:
#route('/tag_search')
def tag_search(session):
access_token = session.get('access_token')
content = "<h2>Tag Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token)
tag_search, next_tag = api.tag_search(q="catband")
tag_recent_media, next = api.tag_recent_media(tag_name=tag_search[0].name)
photos = []
for tag_media in tag_recent_media:
photos.append('<img src="%s"/>' % tag_media.get_standard_resolution_url())
content += ''.join(photos)
except Exception, e:
print e
Thanx in advance:)
After some help from comments, and other resources, I found out that since I have URL of the image, I can use it to download!
The library which is used was "urllib"
I used a counter variable to save images in the same directory where the file is and in the form of 1.jpg, 2.jpg and so on and so forth.
Here is the modified code:
#route('/tag_search')
def tag_search(session):
access_token = session.get('access_token')
content = "<h2>Tag Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token)
tag_search, next_tag = api.tag_search(q="selfie")
tag_recent_media, next = api.tag_recent_media(tag_name=tag_search[0].name)
photos = []
count = 0
for tag_media in tag_recent_media:
photos.append('<img src="%s"/>' % tag_media.get_standard_resolution_url())
urllib.urlretrieve(tag_media.get_standard_resolution_url(), `count`+".jpg")
count = count + 1
content += ''.join(photos)
except Exception, e:
print e
Hope this Helps:)

Categories