As you all know when using youtube api to search for a channel you don't really get the right term. As for example, when searching for 'netfactor' channel it doesn't return me anything because the channel name is 'netFactor'. So how i can implement a method to try to search for all the similarities of a word or to make my method to return all the channels found.
def YoutubeSearch():
API_KEY = os.environ["API_KEY"]
youtube = build('youtube', 'v3', developerKey= 'API_KEY')
request = youtube.channels().list(
part = "snippet",
forUsername = 'netfactor'
)
response = request.execute()
if response['pageInfo']['totalResults'] > 0 :
print(response)
Related
im trying to fetch all youtube videos with location near amsterdam airport that were made in 2012 , however im only getting 7 videos in response. I know there is way more youtube videos that were made in 2012 near this location
anybody know why im not getting all the results? Thanks!
# API client library
import googleapiclient.discovery
# API information
api_service_name = "youtube"
api_version = "v3"
DEVELOPER_KEY = ''
# API client
youtube = googleapiclient.discovery.build(
api_service_name, api_version, developerKey = DEVELOPER_KEY)
# Notice that nextPageToken now is requested in 'fields' parameter
NewPageToken = ""
while True:
if NewPageToken == "":
print('request with no token')
request = youtube.search().list(
part="id,snippet",
type='video',
publishedAfter='2012-01-05T11:10:04Z',
publishedBefore='2012-12-05T11:10:04Z',
location="52.3076865, 4.767424099999971",
locationRadius='1km',
maxResults=1,
fields="nextPageToken,items(id(videoId),snippet(publishedAt,channelId,channelTitle,title,description))"
)
if NewPageToken != "":
print('request with token')
request = youtube.search().list(
part="id,snippet",
type='video',
publishedAfter='2012-01-05T11:10:04Z',
publishedBefore='2012-12-31T11:10:04Z',
location="52.3076865, 4.767424099999971",
locationRadius='1km',
maxResults=1,
fields="nextPageToken,items(id(videoId),snippet(publishedAt,channelId,channelTitle,title,description))",
pageToken = NewPageToken
)
response = request.execute()
if "nextPageToken" in response:
print('foundToken fetching next data')
NewPageToken = response['nextPageToken']
else:
print('noTokenFound Stopping your script')
break
print(response)
# print(request)
I want to download an image with Pexels API (documentation) with Python. First, I get the ID of the picture by doing:
import requests
image_base_url = 'https://api.pexels.com/v1/search'
api_key = 'my_api_key'
my_obj = {'query':'Stock market'}
x = requests.get(image_base_url,headers = {'Authorization':api_key},data = my_obj)
print(x.text)
Then, I obtain an ID for the image I want and run this:
photo_request_link = 'https://api.pexels.com/v1/photos/'
photo_id = {'id':159888}
final_photo = requests.get(photo_request_link,headers = {'Authorization':api_key},data=photo_id)
print(final_photo)
But get 404 Error as a result. Any idea why?
The id is a part of the URL path, not the querystring. See the example in the documentation (https://api.pexels.com/v1/photos/2014422). You should append it to the URL, for example:
photo_request_link = f'https://api.pexels.com/v1/photos/{id}'
I want to know if there is some way through which I can obtain all the Intents(its corresponding questions), Entities and the training data(I defined in Google Dialogflow) programmatically using python.
Following is the code(works fine) through which i get a response from dialogflow.
GOOGLE_APPLICATION_CREDENTIALS = '###'
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] =
GOOGLE_APPLICATION_CREDENTIALS
def dialogflow_api_response(text):
session_client = dialogflow.SessionsClient()
session = session_client.session_path('###', '##')
text_input = dialogflow.types.TextInput(text=text, language_code='en')
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(session=session, query_input=query_input)
jsonObj = MessageToJson(response)
x = json.loads(jsonObj)
If there is, please point me in the right direction.
Thank you
Yes it is possible to get all the intents, entities, unserEntities and contexts from Dialogflow agent using Dilogflow API as JSON object. Here's the code to get the list of intents:
def list_intents(project_id):
import dialogflow_v2 as dialogflow
intents_client = dialogflow.IntentsClient()
parent = intents_client.project_agent_path(project_id)
intents = intents_client.list_intents(parent)
for intent in intents:
print('=' * 20)
print('Intent name: {}'.format(intent.name))
print('Intent display_name: {}'.format(intent.display_name))
print('Action: {}\n'.format(intent.action))
print('Root followup intent: {}'.format(
intent.root_followup_intent_name))
print('Parent followup intent: {}\n'.format(
intent.parent_followup_intent_name))
print('Input contexts:')
for input_context_name in intent.input_context_names:
print('\tName: {}'.format(input_context_name))
print('Output contexts:')
for output_context in intent.output_contexts:
print('\tName: {}'.format(output_context.name))
for training_phrase in intent.training_phrases:
train_phrase = training_phrase.parts[0].text
print(train_phrase)
For further information you can refer to DF official gitHub here
I'm having fun with the FB Graph API collecting "reactions" until I hit the FB limit of 100. (Some of the posts I need to query have well over 1000 reactions)
I do see dictionary key "next" in the json response which is a link to the next group and that group has a next key and so on. Below is a simplified version of what I have so far...
post_id_list = ['387990201279405_1155752427836508'] #short list for this example
def make_post_reaction_url_list(postid_list, APP_ID, APP_SECRET):
''' constructs a list of FB urls to gather reactions to posts limit set to 3'''
post_id_list_queries = []
for post_id in postid_list:
post_id_list_queries.append("https://graph.facebook.com/v2.8/"+post_id+"?fields=reactions.limit(3)&access_token="+ APP_ID + "|" + APP_SECRET)
return post_id_list_queries
post_id_reaction_query_list = make_post_reaction_url_list(post_id_list, APP_ID, APP_SECRET)
def return_json_from_fb_query(post_id_rection_query_list):
list_o_json = []
for target in post_id_reaction_query_list:
t = requests.get(target)
t_json = t.json()
list_o_json.append(t_json)
return list_o_json
list_o_json = return_json_from_fb_query(post_id_reaction_query_list)
list_o_json[0]['reactions']['data'] #gives me information for the response
list_o_json[0]['reactions']['paging']['next'] #returns a http link to the next set of reactions.
Any suggestions how I can collect then follow the "next" link, collect info then follow the next link etc. to the end of the node?
def get_videos(search_keyword):
youtube = build(YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
try:
search_response = youtube.search().list(
q=search_keyword,
part="id,snippet",
channelId=os.environ.get("CHANNELID", None),
maxResults=10, #max = 50, default = 5, min = 0
).execute()
videos = []
channels = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
title = search_result["snippet"]["title"]
videoId = search_result["id"]["videoId"]
channelTitle = search_result["snippet"]["channelTitle"]
cam_thumbnails = search_result["snippet"]["thumbnails"]["medium"]["url"]
publishedAt = search_result["snippet"]["publishedAt"]
channelId = search_result["snippet"]["channelId"]
data = {'title' : title,
'videoId' : videoId,
'channelTitle' : channelTitle,
'cam_thumbnails' : cam_thumbnails,
'publishedAt' : publishedAt}
videos.append(data)
elif search_result["id"]["kind"] == "youtube#channel":
channels.append("%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["channelId"]))
except Exception as e:
print e
Now, I'am using python youtube data api, I get youtube video data that is searched by keyword in specified channel, But I want to get All data that is not searched by keyword in specified channel
How get I youtube video data in specified channel? data that i want to get must be all data in specified channel
I'm not 100% sure I know what you're asking, but I think you're asking how you can get all videos in a channel and not just those related to your keyword? If that's correct, you should just be able to remove:
q=search_keyword,
from your request, and the API should then return all videos in the channel. If you're asking something else, please clarify in your question.