How to search youtube video in channel using youtube - python

def get_videos(search_keyword):
youtube = build(YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
try:
search_response = youtube.search().list(
q=search_keyword,
part="id,snippet",
channelId=os.environ.get("CHANNELID", None),
maxResults=10, #max = 50, default = 5, min = 0
).execute()
videos = []
channels = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
title = search_result["snippet"]["title"]
videoId = search_result["id"]["videoId"]
channelTitle = search_result["snippet"]["channelTitle"]
cam_thumbnails = search_result["snippet"]["thumbnails"]["medium"]["url"]
publishedAt = search_result["snippet"]["publishedAt"]
channelId = search_result["snippet"]["channelId"]
data = {'title' : title,
'videoId' : videoId,
'channelTitle' : channelTitle,
'cam_thumbnails' : cam_thumbnails,
'publishedAt' : publishedAt}
videos.append(data)
elif search_result["id"]["kind"] == "youtube#channel":
channels.append("%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["channelId"]))
except Exception as e:
print e
Now, I'am using python youtube data api, I get youtube video data that is searched by keyword in specified channel, But I want to get All data that is not searched by keyword in specified channel
How get I youtube video data in specified channel? data that i want to get must be all data in specified channel

I'm not 100% sure I know what you're asking, but I think you're asking how you can get all videos in a channel and not just those related to your keyword? If that's correct, you should just be able to remove:
q=search_keyword,
from your request, and the API should then return all videos in the channel. If you're asking something else, please clarify in your question.

Related

cant add tracks to spotify playlist

having an issue with adding tracks to a playlist . i can obtain the currently playing song and create a playlist if it doesnt already exist but once i try to add tracks to the playlist it gives me this error
An error occurred: http status: 403, code:-1
You cannot add tracks to a playlist you don't own., reason: None
def add_artist_songs_to_playlist():
sp_oauth = SpotifyOAuth(client_id, client_secret, redirect_uri,
scope="app-remote-control user-library-read user-read-playback-state user-read-private user-read-email playlist-read-private playlist-modify-public playlist-modify-private",
cache_path=".cache-" + username)
spotify_api = spotipy.Spotify(auth_manager=sp_oauth)
artist_name, artist_id = get_artist_info()
try:
songs = []
offset = 0
while True:
result = spotify_api.artist_albums(artist_id, offset=offset)
albums = result['items']
if not albums:
break
for album in albums:
album_tracks = spotify_api.album_tracks(album['id'])
for track in album_tracks['items']:
songs.append(track['id'])
offset += 20
spotify_api.playlist_add_items(playlist_id, songs)
print(f"Successfully added {len(songs)} songs to the playlist!")
except spotipy.client.SpotifyException as e:
print(f"An error occurred: {e}")
How do you get playlist_id? It is likely not allowing you modify a playlist that does not belong to the authenticated user.
You can try getting the current user's playlists and see if the playlist_id you are trying to modify is listed in there: https://spotipy.readthedocs.io/en/2.22.1/#spotipy.client.Spotify.current_user_playlists

How to find channels with youtube API

As you all know when using youtube api to search for a channel you don't really get the right term. As for example, when searching for 'netfactor' channel it doesn't return me anything because the channel name is 'netFactor'. So how i can implement a method to try to search for all the similarities of a word or to make my method to return all the channels found.
def YoutubeSearch():
API_KEY = os.environ["API_KEY"]
youtube = build('youtube', 'v3', developerKey= 'API_KEY')
request = youtube.channels().list(
part = "snippet",
forUsername = 'netfactor'
)
response = request.execute()
if response['pageInfo']['totalResults'] > 0 :
print(response)

Discord.py slash-command ctx.send()

everyone having a small issue with the Slash commands.
The idea is that I fetch an Array of Assetdata convert each into an embedded and then send the embedded. If there is an image it will show an image and if a .mp4 is linked to the asset it will convert the mp4 to gif and then attache the file locally with file = discord.File(). This all worked just fine bevor moving to Slash commands. For some reason, the slash command wants to send a response bevor receiving all the data. This is an issue because the gif processing takes time or if a large sum of data is requested the request takes longer. The function works fine if have a single asset to display and an Image.
So my question is how do I get ctx.send() to wait??
Thanks in Advance everyone!
client = commands.Bot(command_prefix = "!")
slash = SlashCommand(client, sync_commands=True)
#slash.slash(
name="assetsearch",
description="Sends a Message",
guild_ids=[XXXXXXXXXXXXXXXX], #Hidden for StackOverflow
options = [
create_option(
name = "searchtype",
description = "Please choose how you would like to search for an Asset.",
required = True,
option_type = 3,
choices = [
create_choice(
name = "Search by Asset Id",
value = "id"
),
create_choice(
name = "Search by Collection",
value = "colec"
),
create_choice(
name = "Search by Accountname",
value = "acc"
),
create_choice(
name = "Search by Asset Name",
value = 'match'
)
]
),
create_option(
name = "searchterm",
description = "Please enter an Asset Id, an Account name, the name of an Asset or the Collection name.",
required = True,
option_type = 3
),
create_option(
name = "amount",
description = "Please enter how many Assets you would like to display.",
required = True,
option_type = 3
),
],
)
async def _assetsearch(ctx:SlashContext, searchtype: str, searchterm: str, amount: str):
response = await getAssetData(searchtype, searchterm, amount) #Fetches an Array of AssetData, async function
for asset in response:
nftEmbed, nftFile = await __formatMessage(asset) #formats the message, converts mp4 from website to gif, returns the gif and ebeded
print(nftEmbed,nftFile)
await ctx.send(embed=nftEmbed, file=nftFile) #send the embeded and gif, ISSUE!!!
if os.path.isfile('Nft.gif'): os.remove("Nft.gif")
response = ""
Working Function bevor using slash-command
if msg.startswith('$search'):
try:
searchType = msg.split(",")[1]
except:
searchType = 'match'
try:
searchTerm = msg.split(",")[2]
except:
searchTerm = 'abcd'
try:
searchLimit = msg.split(",")[3]
except:
searchLimit = '1'
response = getAssetData(searchType,searchTerm,searchLimit)
for asset in response:
nftEmbed, file = __formatMessage(asset)
await message.channel.send(embed=nftEmbed, file=file)
if os.path.isfile('Nft.gif'): os.remove("Nft.gif")
response = ""
I figured it out. The Slashcomands are expecting a response within 3s if you are working with an API in the background and then precessing data this could become a criterium you cant meet consistently. This is why you would have the Bot itself respond directly upon request. Once you are done with processing your data you can respond normaly
async def _slashcomandfunction(ctx:SlashContext, str:otherparameters,):
message = await ctx.send('Sometext') #Direct respond
await ctx.channel.send(embed=embed, file=file) #Actual message
If you don't want the Bots message
message = await ctx.send('Sometext',delete_after=1)
...or edit the message the Bot sent.
message = await ctx.send('Sometext')
await message.edit(content="newcontent")
Can't you just use asyncio?
import asyncio
await asyncio.sleep(number)
Or is this not what you are looking for?

How to scrape 10k records per day from youtube using google API

I am using Google api to scrape data from YouTube using YouTube v3. Based on search keyword I am trying to scrape the data like likescount, viewscount, dislikescount etc.
The problem is by default we are able to get up to 50 records. I need to get more records and we can achieve that by using pagination.
In January 11 2019 Google has decreased from 1M records to 10K per day. To request 10k record per day we need to do pagination and I am not sure how to set the pagination in my code.
from apiclient.discovery import build
import argparse
import csv
import unidecode
DEVELOPER_KEY = "xxxxxxx"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def youtube_search(options):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(q=options.q,part="id,snippet",maxResults=options.max_results).execute()
videos = []
channels = []
playlists = []
csvFile = open('checking_for_no_of_records.csv','w')
csvWriter = csv.writer(csvFile)
csvWriter.writerow(["title","videoId","viewCount","likeCount","dislikeCount", "commentCount","favoriteCount"])
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
title = search_result["snippet"]["title"]
title = unidecode.unidecode(title)
videoId = search_result["id"]["videoId"]
video_response = youtube.videos().list(id=videoId,part="statistics").execute()
for video_result in video_response.get("items",[]):
viewCount = video_result["statistics"]["viewCount"]
if 'likeCount' not in video_result["statistics"]:
likeCount = 0
else:
likeCount = video_result["statistics"]["likeCount"]
if 'dislikeCount' not in video_result["statistics"]:
dislikeCount = 0
else:
dislikeCount = video_result["statistics"]["dislikeCount"]
if 'commentCount' not in video_result["statistics"]:
commentCount = 0
else:
commentCount = video_result["statistics"]["commentCount"]
if 'favoriteCount' not in video_result["statistics"]:
favoriteCount = 0
else:
favoriteCount = video_result["statistics"]["favoriteCount"]
csvWriter.writerow([title,videoId,viewCount,likeCount,dislikeCount, commentCount,favoriteCount])
csvFile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--q', help='Search term', default='Google')
parser.add_argument('--max-results', help='Max results',default = 50)
args = parser.parse_args()
youtube_search(args)
With this above code I'm able to get only 50 records, but I need to get 10K records per day.

python - YouTube API search().list_next() not working well

I'm using this code. But I get 50 results only. It does not show the next 50 results. I mean, it does not get the nextPageToken. Am I doing something wrong? Or search().list_next() doesn't work?
def youtube_search(options):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
req = youtube.search().list(
q=options.q,
part="id,snippet",
maxResults=50,
channelId="my_channel_id",
order="date",
type="video"
)
while req:
res = req.execute()
for item in res["items"]:
if item["id"]["kind"] == "youtube#video":
video_id=item["id"]["videoId"]
video_title=item["snippet"]["title"]
video_date=item["snippet"]["publishedAt"]
print("%s # %s # %s" % (video_id, video_title, video_date))
req = youtube.search().list_next(req, res)
I'm seaching in my channel using channelId, I know I have more than 50 results.
The maxResults, is the results per page. (value by default is 50)
q=options.q is the query, a word to base the search results ('dog' for example)

Categories