I am using python-instagram API and I am displaying some images with searched tag!
Rather than displaying, I want to save those images so that it can be used for further analysis.
Is it possible? I am new to python and using API's.
Here is my code snippet which does this:
#route('/tag_search')
def tag_search(session):
access_token = session.get('access_token')
content = "<h2>Tag Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token)
tag_search, next_tag = api.tag_search(q="catband")
tag_recent_media, next = api.tag_recent_media(tag_name=tag_search[0].name)
photos = []
for tag_media in tag_recent_media:
photos.append('<img src="%s"/>' % tag_media.get_standard_resolution_url())
content += ''.join(photos)
except Exception, e:
print e
Thanx in advance:)
After some help from comments, and other resources, I found out that since I have URL of the image, I can use it to download!
The library which is used was "urllib"
I used a counter variable to save images in the same directory where the file is and in the form of 1.jpg, 2.jpg and so on and so forth.
Here is the modified code:
#route('/tag_search')
def tag_search(session):
access_token = session.get('access_token')
content = "<h2>Tag Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token)
tag_search, next_tag = api.tag_search(q="selfie")
tag_recent_media, next = api.tag_recent_media(tag_name=tag_search[0].name)
photos = []
count = 0
for tag_media in tag_recent_media:
photos.append('<img src="%s"/>' % tag_media.get_standard_resolution_url())
urllib.urlretrieve(tag_media.get_standard_resolution_url(), `count`+".jpg")
count = count + 1
content += ''.join(photos)
except Exception, e:
print e
Hope this Helps:)
Related
The code below is what I use to upload a profile picture but I get the following error:
def get_image_bytes():
file_size = os.path.getsize('./image/example.jpg')
return file_size
def get_image_raw():
with open("./image/example.jpg", "rb") as img_file:
enc_image = img_file.read()
return enc_image
def get_media_id():
file_bytes = get_image_bytes();
resp = httpx.post(f'https://upload.twitter.com/i/media/upload.json?command=INIT&total_bytes={file_bytes}&media_type=image/jpeg', headers=general_headers)
return resp.json()['media_id_string']
def append_image():
media_id = get_media_id()
resp = httpx.post(f'https://upload.twitter.com/i/media/upload.json?command=APPEND&media_id={media_id}&segment_index=0&media={get_image_raw()}', headers=webkit_headers)
return media_id
def update_profile():
media_id = append_image()
resp = httpx.post(f'https://upload.twitter.com/i/media/upload.json?command=FINALIZE&media_id={media_id}', headers=general_headers)
print(resp.json())
update_profile()
Error:
{'request': '/i/media/upload.json', 'error': 'Segments do not add up to provided total file size.'}
I don't know how twitter wants to receive the image binary.
I've heard word that they expect chunks but I've tried almost everything.
When uploading a new profile picture I do see that they crop the image maybe this can be a reason why but I am to no avail at the moment.
My image is under 5mb by the way.
I am new to categories in youtube api in Python and I'm encountering issues regarding the language of the videos after I select the region to find videos in.
The problem that I am encountering is that when I enter a region code, it comments on videos that aren't in English even though it's meant to be.
E.G: I enter the region code 'US' and the outcome is what I have attached. It comments on videos that are in a different language.
result [![enter image description here][1]][1]
I have tried to change the region_code="US" in the script but it has problems with 'US' not being defined.
Does anyone know how I can get around this problem or what I'm doing wrong? Thanks
API_KEY = "key"
# This function loads the comments in Comments.txt file
def load_comments(file):
comments = []
f = open(file, 'r', encoding='utf8')
for comment in f:
comments.append(comment)
return comments
def search_video(keyword, region_code, comments):
# Fucntion from Library
from apiclient.discovery import build
import datetime
import time
def get_category_id(youtube, cat):
req = youtube.videoCategories().list(part='snippet', regionCode=region_code)
response = req.execute()
items_list = response["items"]
for item in items_list:
video_category_id = item.get("id")
if video_category_id is not None:
return video_category_id
def search(youtube, video_category_id=None):
if video_category_id is None:
video_category_id = get_category_id(youtube, keyword)
req = youtube.search().list(videoCategoryId=video_category_id, order='date', maxResults=len(comments), # q=keyword,
publishedAfter=(datetime.datetime.utcnow() +
datetime.timedelta(minutes = -1)).isoformat('T')+'Z',
part='snippet',type='video')
return req, video_category_id
api_key = API_KEY
youtube = build('youtube', 'v3', developerKey=api_key)
req, video_category_id = search(youtube)
res = req.execute()
while len(res['items']) == 0:
time.sleep(10)
req, _ = search(youtube, video_category_id) # re-use category id if already found to prevent lag
res = req.execute()
videoid = [res['items'][i]['id']['videoId'] for i in range(len(res['items']))]
return videoid
(It asks what region to use btw)
I am trying to return a directory of images from the server to the client. I'm having a hard time understanding how to format my response so that the client is able to distinguish between the files.
What is the best practice way to accomplish what I am doing? my photos are kept in a tmp folder within the application. Here is my utils.py:
def get_multipart_fields():
final = {}
for file in os.listdir(os.path.dirname(os.path.realpath(__file__)) + '/tmp'):
try:
with open(os.path.dirname(os.path.abspath(__file__)) + '/tmp/' + os.path.join(file), "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
final[os.path.join(file)] = { 'filename': encoded_string }
except Exception as e:
print e
return final.to_string()
and my views.py:
#app.route('/download-files', methods=['GET'])
def gallery():
client = get_client()
resource = get_resource()
download_dir(client, resource, 'test-bucket')
file_names = get_multipart_fields()
m = MultipartEncoder(
fields=file_names
)
return Response(json.dumps(m), mimetype=m.content_type)
Where am I going wrong with this logic ? Any help would be greatly appreciated.
I'm pretty new to coding and have been trying some things out. I am getting this error when I run a python script I have. I have read that this error is because something is returning "None" but I'm having trouble figuring out what is causing it (still trying to learn all of this).
The purpose of the script is pulling thumbnails from videos and searching the internet for other instances of the same thing. After running my python script, it returns a result of:
[*] Retrieving video ID: VIDEOID
Traceback (most recent call last):
File "VidSearch.py", line 40, in <module>
thumbnails = video_data['items'][0]['snippet']['thumbnails']
TypeError: 'NoneType' object has no atribute '_getitem_'
The following is the script I am running (Youtube Key removed):
import argparse
import requests
import json
from pytineye import TinEyeAPIRequest
tineye = TinEyeAPIRequest('http://api.tineye.com/rest/','PUBLICKEY','PRIVATEKEY')
youtube_key = "VIDEOID"
ap = argparse.ArgumentParser()
ap.add_argument("-v","--videoID", required=True,help="The videoID of the YouTube video. For example: https://www.youtube.com/watch?v=VIDEOID")
args = vars(ap.parse_args())
video_id = args['videoID']
#
# Retrieve the video details based on videoID
#
def youtube_video_details(video_id):
api_url = "https://www.googleapis.com/youtube/v3/videos?part=snippet%2CrecordingDetails&"
api_url += "id=%s&" % video_id
api_url += "key=%s" % youtube_key
response = requests.get(api_url)
if response.status_code == 200:
results = json.loads(response.content)
return results
return None
print "[*] Retrieving video ID: %s" % video_id
video_data = youtube_video_details(video_id)
thumbnails = video_data['items'][0]['snippet']['thumbnails']
print "[*] Thumbnails retrieved. Now submitting to TinEye."
url_list = []
# add the thumbnails from the API to the list
for thumbnail in thumbnails:
url_list.append(thumbnails[thumbnail]['url'])
# build the manual URLS
for count in range(4):
url = "http://img.youtube.com/vi/%s/%d.jpg" % (video_id,count)
url_list.append(url)
results = []
# now walk over the list of URLs and search TinEye
for url in url_list:
print "[*] Searching TinEye for: %s" % url
try:
result = tineye.search_url(url)
except:
pass
if result.total_results:
results.extend(result.matches)
result_urls = []
dates = {}
for match in results:
for link in match.backlinks:
if link.url not in result_urls:
result_urls.append(link.url)
dates[link.crawl_date] = link.url
print
print "[*] Discovered %d unique URLs with image matches." % len(result_urls)
for url in result_urls:
print url
oldest_date = sorted(dates.keys())
print
print "[*] Oldest match was crawled on %s at %s" % (str(oldest_date[0]),dates[oldest_date[0]])
I know it's probably something simple but I can't seem to figure it out for the life of me. Any help would be greatly appreciated.
In your youtube_video_details method. the response.status_code maybe is not 200, so the method return the None
So you can do like this:
video_data = youtubo_video_details(video_id)
if not video_data:
thumbnails = video_data['items'][0]['snippet']['thumbnails']
I'm a complete beginner in Python, trying to get a script to work, but I'm a little at loss on where it goes wrong. From reading other posts it seems result hasn't been mentioned before and it doesn't know how to deal with the results.
I'm running Python 2.7.11 on EL Capitan
python ytc.py -v YQHsXMglC9A [*] Retrieving video ID: YQHsXMglC9A [*]
Thumbnails retrieved. Now submitting to TinEye. [*] Searching TinEye
for: https://i.ytimg.com/vi/YQHsXMglC9A/default.jpg Traceback (most
recent call last): File "ytc.py", line 72, in <module>
if result.total_results: NameError: name 'result' is not defined
This is the script:
import argparse
import requests
import json
from pytineye import TinEyeAPIRequest
tineye = TinEyeAPIRequest('http://api.tineye.com/rest/','PUBLICKEY','PRIVATEKEY')
youtube_key = "MY-API"
ap = argparse.ArgumentParser()
ap.add_argument("-v","--videoID", required=True,help="The videoID of the YouTube video. For example: https://www.youtube.com/watch?v=VIDEOID")
args = vars(ap.parse_args())
video_id = args['videoID']
#
# Retrieve the video details based on videoID
#
def youtube_video_details(video_id):
api_url = "https://www.googleapis.com/youtube/v3/videos?part=snippet%2CrecordingDetails&"
api_url += "id=%s&" % video_id
api_url += "key=%s" % youtube_key
response = requests.get(api_url)
if response.status_code == 200:
results = json.loads(response.content)
return results
return None
print "[*] Retrieving video ID: %s" % video_id
video_data = youtube_video_details(video_id)
thumbnails = video_data['items'][0]['snippet']['thumbnails']
print "[*] Thumbnails retrieved. Now submitting to TinEye."
url_list = []
# add the thumbnails from the API to the list
for thumbnail in thumbnails:
url_list.append(thumbnails[thumbnail]['url'])
# build the manual URLS
for count in range(4):
url = "http://img.youtube.com/vi/%s/%d.jpg" % (video_id,count)
url_list.append(url)
results = []
# now walk over the list of URLs and search TinEye
for url in url_list:
print "[*] Searching TinEye for: %s" % url
try:
result = tineye.search_url(url)
except:
pass
if result.total_results:
results.extend(result.matches)
result_urls = []
dates = {}
for match in results:
for link in match.backlinks:
if link.url not in result_urls:
result_urls.append(link.url)
dates[link.crawl_date] = link.url
print
print "[*] Discovered %d unique URLs with image matches." % len(result_urls)
for url in result_urls:
print url
oldest_date = sorted(dates.keys())
print
print "[*] Oldest match was crawled on %s at %s" % (str(oldest_date[0]),dates[oldest_date[0]])
If the try-except fails, it will execute the except block which has only pass, no assignment of variable result, so if that's the case, on if result.total_results, you are referencing an object which does not exist.
This should be a quick fix
try:
result = tineye.search_url(url)
except NameError:
print 'Nothing Found !'
break
if result.total_results:
results.extend(result.matches)
The error is clear, result variable is used while its not defined. this happen in the case of failure in your try except instruction.
Fix it by moving the instruction into the try block :
for url in url_list:
print "[*] Searching TinEye for: %s" % url
try:
result = tineye.search_url(url)
if result.total_results:
results.extend(result.matches)
except:
pass