if no value, do nothing else - python

here the code
import subprocess,os
import xbmcgui
import xbmc
import xbmcaddon
import urllib2
import re
import string
#Initialize value for ref.
menu = 0
video = 0
audio = 0
music_title = 0
music_title1 = 1
#Initialize ADDON
settings = xbmcaddon.Addon(id='micasaverde.addon')
#Initialize ADDON INFORMATION
ip = settings.getSetting( "vera_ip" )
dev = settings.getSetting( "vera_dev" )
while (not xbmc.abortRequested):
win = (xbmcgui.getCurrentWindowId())
if xbmc.Player().isPlayingAudio():
tag = xbmc.Player().getMusicInfoTag()
music_title = tag.getTitle()
music_title = music_title.replace(' ', '_')
if music_title != music_title1:
urllib2('http://%s:3480/data_request?id=variableset&DeviceNum=%s&serviceId=urn:upnp-org:serviceId:XBMCState1&Variable=IdleTime&Value=%s' % (ip, dev, music_title))
music_title1 = music_title
The thing is that some time it send the command but with no value (music_title) at the end of the url and it crash the code. how could i send the url only if there something in it. I try to search on the net but without any success
Thanks
Mike

Change:
if music_title != music_title1:
to:
if music_title.strip() and music_title != music_title1:

Related

API not properly capturing data and timing out

I currently have a script pulling data from Instagram that looks like the code block posted below. As long as you enter your plug in your Instagram credentials under user_name and password, it should be fully reproducible.
It is taking the account listed in player_df, pulling a list of all their followers on Instagram, and taking that list of followers and pulling all of their bio information. But when I run it, I get the following error:
ClientConnectionError: timeout The read operation timed out
You can find the entire error log here, I just didn't want to post it in the original question because it would exceed the character limit.
As an attempt to fix this, I added in the sleep(300) functions to lessen the stress between API calls, but that doesn't seem to do the trick. What would be the best way to get around this so it doesn't timeout while trying to run?
from ftplib import error_proto
from hashlib import new
from multiprocessing.spawn import import_main_path
from time import sleep
from instagram_private_api import Client, ClientCompatPatch
from operator import itemgetter
import pandas as pd
import json
import requests
from collections import Counter
import datetime
import os.path
user_name = "XXXXX"
password = "XXXXX"
players = [['hannahkshepherd', '201683404']]
player_df = pd.DataFrame(players, columns=['username', 'userId'])
def pull_followers(username_instagram, userid_instagram):
followers = []
combinacao = []
results = api.user_followers(userid_instagram, rank_token=api.generate_uuid())
followers.extend(results.get('users', []))
next_max_id = results.get('next_max_id')
while next_max_id:
results = api.user_followers(userid_instagram, rank_token=api.generate_uuid(), max_id=next_max_id)
followers.extend(results.get('users', []))
next_max_id = results.get('next_max_id')
userid = [followers[i]['pk'] for i in range(0,len(followers))]
full_names = [followers[i]['full_name'] for i in range(0,len(followers))]
usernames = [followers[i]['username'] for i in range(0,len(followers))]
profile_pic_url = [followers[i]['profile_pic_url'] for i in range(0,len(followers))]
followers_text = ['follower' for i in range(0,len(followers))]
following_username = [str(username_instagram) for i in range(0,len(followers))]
following_userid = [str(userid_instagram) for i in range(0,len(followers))]
combinacao.extend([list(i) for i in zip(userid, full_names,
usernames, profile_pic_url, followers_text,
following_username, following_userid)])
combinacao = sorted(combinacao, key=itemgetter(2), reverse=False)
return combinacao
all_followers = []
for i in range(len(player_df)):
all_followers += pull_followers(player_df['username'][i], player_df["userId"][i])
def get_bios(followers):
bios = []
for follower in followers:
follower_id = follower[0]
bios += [[follower_id, api.user_info(follower_id)['user']['biography']]]
return bios
#sleep(300)
bios = get_bios(all_followers)
#sleep(300)
def print_bios():
s = ''
for row in bios:
s += '\n' + 'user_id: ' + str(row[0]) + ', bio: ' + str(row[1])
print(s)

How can I add threading on my Python code?

Below is my try to create a username availability checker with proxies, so far it works as intended
the only thing is that its slow, i tried to implement threads but no different as im not sure if im doing it right or not.
used concurrent.futures and threading libraries.
Is there a better way to code this kind of programs or are there any other suggestions?
Thanks in advance
import requests
import json
import ctypes
import colorama
from colorama import Fore
from datetime import datetime
import os
os.system("cls")
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
colorama.init()
url = "https://link"
def grab_proxies():
proxylist = []
prx = open('proxy.txt','r')
prx = prx.readlines()
for proxy in prx:
proxy = proxy.rstrip("\n")
proxylist.append(proxy)
return proxylist
prlist = grab_proxies()
def grab_usernames():
userlist = []
users = open('userlist.txt','r')
users = users.readlines()
for user in users:
user = user.rstrip("\n")
userlist.append(user)
return userlist
ulist = grab_usernames()
found = 0
pc = 0
uc = 0
for i in range(0,len(prlist)):
ctypes.windll.kernel32.SetConsoleTitleW(f"[# Checker] | Counter: %s - Found: %s - Current Proxy: %s - Started at: %s" % (i, found, prlist[pc], current_time))
try:
req = requests.post(url,headers=headers, data = {"requested_username": ulist[uc], "xsrf_token": "F0kpyvjJgeBtsOk5Gl6Jvg"},proxies={'http' : prlist[pc],'https': prlist[pc]}, timeout=2)
response = req.json()
#print(response,req.status_code)
#print(response)
#print(type(response))
if(response['reference']['status_code'] == 'TAKEN'):
#rd = response['errors']['username'][0]['code']
print(f'{Fore.LIGHTBLACK_EX}[{Fore.LIGHTRED_EX}Taken{Fore.LIGHTBLACK_EX}]{Fore.LIGHTCYAN_EX} {ulist[uc]}')
#print(ulist[uc]+" Taken")
uc+=1
elif(response['reference']['status_code'] == 'OK'):
print(f'{Fore.LIGHTBLACK_EX}[{Fore.LIGHTGREEN_EX}Available{Fore.LIGHTBLACK_EX}]{Fore.LIGHTCYAN_EX} {ulist[uc]}')
#print(ulist[uc]+" Available")
f = open("found.txt","a")
f.write(ulist[uc]+"\n")
f.close()
found+=1
uc+=1
elif(response['reference']['status_code'] == 'INVALID_BEGIN'):
print(f'{Fore.LIGHTBLACK_EX}[{Fore.LIGHTRED_EX}Invalid Username{Fore.LIGHTBLACK_EX}]{Fore.LIGHTCYAN_EX} {ulist[uc]}')
uc+=1
elif(response['reference']['status_code'] == 'DELETED'):
print(f'{Fore.LIGHTBLACK_EX}[{Fore.LIGHTRED_EX}Deleted{Fore.LIGHTBLACK_EX}]{Fore.LIGHTCYAN_EX} {ulist[uc]}')
uc+=1
else:
print(response)
except:
#print(prlist[pc]+ " Going to next proxy")
pc+=1
pass
#break
x = input("Finished!.. press enter to exit")
You could use https://github.com/encode/requests-async to do your requests in an async way

Youtube V3 API doesn't sort video

So I have been using youtube api to scrape a channel. Everything was working fine until 3 days ago (03/15/2019) when the result isn't sorted anymore. It seems that no matter what I put in the order parameter, the results are all the same. Can anyone tell me why it isn't working? Here's the code snippet:
import re
import os
import json
import MySQLdb
from pytube import YouTube
import urllib
import isodate
import sys
def get_all_video_in_channel(channel_id):
api_key = '<MY KEY>'
video_url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails&id={}&key={}'
first_url = 'https://www.googleapis.com/youtube/v3/search?key={}&channelId={}&part=snippet,id&order=date&maxResults=50'.format(api_key, channel_id) #order by date but won't work
res = []
url = first_url
while True:
inp = urllib.urlopen(url)
resp = json.load(inp)
vidIds = []
for jobject in resp['items']:
if jobject['id']['kind'] == "youtube#video":
vidIds.append(jobject['id']['videoId'])
vidreq = urllib.urlopen(video_url.format(",".join(vidIds),api_key))
vidres = json.load(vidreq)
for vidjson in vidres['items']:
res.append(vidjson)
if (len(res) >= 50):
break
try:
next_page_token = resp['nextPageToken']
url = first_url + '&pageToken={}'.format(next_page_token)
except:
break
return res
c_id = 'UCycyxZMoPwg9cuRDMyQE7PQ'
episodes = get_all_video_in_channel(c_id)
Edit: I did some more research and people say that the API indeed is not working properly due to Youtube doing something with deleting the New Zealand shooting video and it will soon be working properly again.
I recommend you to see the answer https://stackoverflow.com/a/55220182/8327971. This is a known and acknowledged issue by Google: https://issuetracker.google.com/issues/128673552.

Azure Storage Python SDK : Uploading file to Azure blob storage without writting it on my disk

I have a lot of Images from my Apache server that I want to put to azure.
I cannot afford to do it in a sequential manner , SO I will add threading afterwards. I can access those images from a given URL and build a list on that. Easy.
Now I do not have enough disk space for downloading the image and uploading it then delete it. I would like something cleaner.
Now is there a method to do that ?
Something like :
block_blob_service.AZURECOMMAND(container, source_URL, target_blob_name)
If not possible, is there a workaround ?
here is the complete code I have today ( download and then upload which I want to avoid ):
EDIT : Thanks to Gaurav Mantri I got it now. I update the code.
import requests
from bs4 import BeautifulSoup
from os.path import basename
import os
import sys
import urllib
import urllib2
import urlparse
import argparse
import json
import config
import random
import base64
import datetime
import time
import string
from azure.storage import CloudStorageAccount, AccessPolicy
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
from azure.storage.models import CorsRule, Logging, Metrics, RetentionPolicy, ResourceTypes, AccountPermissions
from azure.storage.blob.models import BlobBlock, ContainerPermissions, ContentSettings
#from azure.storage.blob import BlobService
from azure.storage import *
#from azure.storage.blob.blobservice import BlobService
CURRENT_DIR = os.getcwd()
STORING_DIRECTORY_NAME = "stroage_scrapped_images"
STORING_DIRECTORY = CURRENT_DIR+"/"+STORING_DIRECTORY_NAME
if not os.path.exists(STORING_DIRECTORY):
os.makedirs(STORING_DIRECTORY)
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
startdate = time.clock()
metadata_loaded = {'Owner': 'ToBeAddedSoon', 'Date_Of_Upload': startdate, 'VAR_2': 'VAL_VAR_2','VAR_3': 'VAL_VAR_3','VAR_4': 'VAL_VAR_4'}
with open("credentials.json", 'r') as f:
data = json.loads(f.read())
StoAcc_var_name = data["storagacc"]["Accountname"]
StoAcc_var_key = data["storagacc"]["AccountKey"]
StoAcc_var_container = data["storagacc"]["Container"]
#print StoAcc_var_name, StoAcc_var_key, StoAcc_var_container
def copy_azure_files(source_url,destination_object,destination_container):
blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
blob_service.copy_blob(destination_container, destination_object, source_url)
block_blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
def upload_func(container,blobname,filename):
start = time.clock()
block_blob_service.create_blob_from_path(
container,
blobname,
filename)
elapsed = time.clock()
elapsed = elapsed - start
print "*** DEBUG *** Time spent uploading API " , filename , " is : " , elapsed , " in Bucket/container : " , container
#URL_TARGET = "https://mouradcloud.westeurope.cloudapp.azure.com/blog/blog/category/food/"
URL_TARGET = "https://www.cdiscount.com/search/10/telephone.html"
base_url = URL_TARGET
out_folder = '/tmp'
r = requests.get(URL_TARGET)
data = r.text
soup = BeautifulSoup(data, "lxml")
for link in soup.find_all('img'):
src = link
image_url = link.get("src")
while image_url is not None :
if 'http' in image_url:
blocks = []
if image_url.endswith(('.png', '.jpg', '.jpeg')):
print " ->>>>>>>>>>>>>> THIS IS AN IMAGE ... PROCESSING "
file_name_downloaded = basename(image_url)
file_name_path_local = STORING_DIRECTORY+"/"+file_name_downloaded
with open(file_name_path_local, "wb") as f:
f.write(requests.get(image_url).content)
filename_in_clouddir="uploads"+"/"+file_name_downloaded
#upload_func(StoAcc_var_container,filename_in_clouddir,file_name_path_local)
copy_azure_files(image_url,filename_in_clouddir,StoAcc_var_container)
break
else :
print " ->>>>>>>>>>>>>> THIS NOT AN IMAGE ... SKIPPING "
break
else :
print " ->>>>>>>>>>>>>> THIS IS A LOCAL IMAGE ... SKIPPING "
break
continue
Indeed there's something exactly like this: copy_blob
block_blob_service.copy_blob(container, target_blob_name, source_URL)
Please keep in mind that this copy operation is asynchronous server side copying, thus:
Source of the copy should be publicly available.
You must wait for the copy operation to finish before deleting source items.
UPDATE
Modified code (I have not tried running it)
import requests
from bs4 import BeautifulSoup
from os.path import basename
import os
import sys
import urllib
import urllib2
import urlparse
import argparse
import json
import config
import random
import base64
import datetime
import time
import string
from azure.storage import CloudStorageAccount, AccessPolicy
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
from azure.storage.models import CorsRule, Logging, Metrics, RetentionPolicy, ResourceTypes, AccountPermissions
from azure.storage.blob.models import BlobBlock, ContainerPermissions, ContentSettings
CURRENT_DIR = os.getcwd()
STORING_DIRECTORY_NAME = "stroage_scrapped_images"
STORING_DIRECTORY = CURRENT_DIR+"/"+STORING_DIRECTORY_NAME
if not os.path.exists(STORING_DIRECTORY):
os.makedirs(STORING_DIRECTORY)
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
startdate = time.clock()
metadata_loaded = {'Owner': 'ToBeAddedSoon', 'Date_Of_Upload': startdate, 'VAR_2': 'VAL_VAR_2','VAR_3': 'VAL_VAR_3','VAR_4': 'VAL_VAR_4'}
with open("credentials.json", 'r') as f:
data = json.loads(f.read())
StoAcc_var_name = data["storagacc"]["Accountname"]
StoAcc_var_key = data["storagacc"]["AccountKey"]
StoAcc_var_container = data["storagacc"]["Container"]
#print StoAcc_var_name, StoAcc_var_key, StoAcc_var_container
block_blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
def upload_func(container,blobname,sourceurl):
start = time.clock()
block_blob_service.copy_blob(
container,
blobname,
sourceurl)
elapsed = time.clock()
elapsed = elapsed - start
print "*** DEBUG *** Time spent uploading API " , filename , " is : " , elapsed , " in Bucket/container : " , container
#URL_TARGET = "https://mouradcloud.westeurope.cloudapp.azure.com/blog/blog/category/food/"
URL_TARGET = "https://www.cdiscount.com/search/10/telephone.html"
base_url = URL_TARGET
out_folder = '/tmp'
r = requests.get(URL_TARGET)
data = r.text
soup = BeautifulSoup(data, "lxml")
for link in soup.find_all('img'):
src = link
image_url = link.get("src")
while image_url is not None :
if 'http' in image_url:
blocks = []
if image_url.endswith(('.png', '.jpg', '.jpeg')):
print " ->>>>>>>>>>>>>> THIS IS AN IMAGE ... PROCESSING "
file_name_downloaded = basename(image_url)
filename_in_clouddir="uploads"+"/"+file_name_downloaded
upload_func(StoAcc_var_container,filename_in_clouddir,image_url)
break
else :
print " ->>>>>>>>>>>>>> THIS NOT AN IMAGE ... SKIPPING "
break
else :
print " ->>>>>>>>>>>>>> THIS IS A LOCAL IMAGE ... SKIPPING "
break
continue

How to loop this so I can add more twitter accounts

import time
from TwitterAPI import TwitterAPI
import requests
from requests_oauthlib import OAuth1
from urlparse import parse_qs
# application's key and key secret
ck = ''
cs = ''
# obtain request token
oauth = OAuth1(ck, cs)
r = requests.post(url='https://api.twitter.com/oauth/request_token', auth=oauth)
credentials = parse_qs(r.content)
request_key = credentials.get('oauth_token')[0]
request_secret = credentials.get('oauth_token_secret')[0]
#obtain authorization from twitter user
print('Visit this link to authorize the TweetBot:\n https://api.twitter.com/oauth/authorize?oauth_token=%s' % request_key)
verifier = raw_input('Enter your verification code: ')
# obtain access token
oauth = OAuth1(ck, cs, request_key, request_secret, verifier=verifier)
r = requests.get(url='https://api.twitter.com/oauth/access_token', auth=oauth)
credentials = parse_qs(r.content)
tk = credentials.get('oauth_token')[0]
ts = credentials.get('oauth_token_secret')[0]
# access TwitterAPI with the obtained access
api = TwitterAPI(ck, cs, tk, ts)
f = open('tweetbot.txt', 'rU')
for line in f:
r = api.request('statuses/update', {'status' : line})
print line,
print r.status_code
time.sleep(600)
I have this code but I want it to add multiple twitter accounts. I guess I'd need to loop the input and store them in different variables? Please give me the best solution. I'm not that advanced with python.
Alright #user3392493, what you need is a while loop, but with several if statements inside and a counter variable, so the variables are set based on what number the counter is at:
enoughaccounts = False
counter = 0
while not enoughaccounts:
counter += 1
if counter == 1:
# Do Everything
account1 = .....
if counter == 2:
# Do Everything
account2 = .....
if counter == 3:
# Do Everything
account3 = .....
if counter == 4:
# Do Everything
account4 = .....
moreaccounts = input('Add another account? (type yes or no)')
if moreaccounts == 'no' or moreaccounts == 'No' or moreaccounts == 'NO':
enoughaccounts = True
This should do what you want it to, if you need anything else just ask :)

Categories