Importing dictionaries from a file in Python 3.6 - python

I'm trying to make a bot in Python to manage some tasks on twitter using tweepy.
I'm saving the credentials for auth in several files but I want one file only. Here is an example:
for x in range(0, 3):
if x == 0 : from keysaccount1 import keys
if x == 1 : from keysaccount2 import keys
if x == 2 : from keysaccount3 import keys
if x == 3 : from keysaccount4 import keys
CONSUMER_KEY = keys['consumer_key']
CONSUMER_SECRET = keys['consumer_secret']
ACCESS_TOKEN = keys['access_token']
ACCESS_TOKEN_SECRET = keys['access_token_secret']
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
The file on which the data are stored is in this format, for example keysaccount1 :
keys = dict(
consumer_key = 'xxxxxxxxxxxxx',
consumer_secret = 'xxxxxxxxxxxxx',
access_token = 'xxxxxxxxxxxxx',
access_token_secret = 'xxxxxxxxxxxxx',
)
Is there a way to save all this on the same file, as I have the x variable on the cycle that I could use to choose which account I'm managing?

You can use a single dictionary:
auth_data = {
0: {
'consumer_key': 'xxxxxxxxxx',
'consumer_secret': 'xxxxxxxxxxxxx',
'access_token': 'xxxxxxxxxx',
'access_token_secret': 'xxxxxxxxxxxxxx',
}
...
You can access particular fields like this then:
for x in range(0, 3):
keyset = auth_data[x]
auth = tweepy.OAuthHandler(keyset['consumer_key'], keyset['consumer_secret'])
auth.set_access_token(keyset['access_token'], keyset['access_token_secret'])
api = tweepy.API(auth, wait_on_rate_limit=True)
To save the data, you can just use the python module pickle:
import pickle
def read():
with open('data/keys.p', 'r') as f:
return pickle.load(f)
def write(data):
with open('data/keys.p', 'w+') as f:
return pickle.dump(f, data)
# Read data
auth_data = read()
# Write data
write(auth_data)

Related

(python)Determine when to end and restart(loop) twitter scraping

Determine when to end and restart(loop) twitter scraping
Hello This is twitter scraping code. You are importing tweets that contain keywords.
What I want to do is end the crawl after 10 hours. And it is cumulative restart to the current output.
I left a note to hear the code advice on how to do this.
import tweepy
import time
import os
import json
import simplejson
search_term = 'word1'
search_term2= 'word2'
search_term3='word3'
lat = "xxxx"
lon = "xxxx"
radius = "xxxx"
location = "%s,%s,%s" % (lat, lon, radius)
API_key = "xxxx"
API_secret = "xxxx"
Access_token = "xxxx"
Access_token_secret = "xxxx"
auth = tweepy.OAuthHandler(API_key, API_secret)
auth.set_access_token(Access_token, Access_token_secret)
api = tweepy.API(auth)
c=tweepy.Cursor(api.search,
q="{}+OR+{}".format(search_term, search_term2, search_term3),
rpp=1000,
geocode=location,
include_entities=True)
data = {}
i = 1
for tweet in c.items():
data['text'] = tweet.text
print(i, ":", data)
i += 1
time.sleep(1)
wfile = open(os.getcwd()+"/workk2.txt", mode='w')
data = {}
i = 0
for tweet in c.items():
data['text'] = tweet.text
wfile.write(data['text']+'\n')
i += 1
wfile.close()

python_" .txt" files can not be created

" .txt " files can not be created.
The code has been created, but the file is not created.
I've been advised to use " pickle ".
But I don't know how to use " pickle. "
How can I use this code to save it as a file
Also, I would like to place the number in order to save.
import tweepy
import time
import os
import json
import simplejson
search_term = 'word1'
search_term2= 'word2'
search_term3='word3'
lat = "xxxx"
lon = "xxxx"
radius = "xxxx"
location = "%s,%s,%s" % (lat, lon, radius)
API_key = "xxxx"
API_secret = "xxxx"
Access_token = "xxxx"
Access_token_secret = "xxxx"
auth = tweepy.OAuthHandler(API_key, API_secret)
auth.set_access_token(Access_token, Access_token_secret)
api = tweepy.API(auth)
c=tweepy.Cursor(api.search,
q="{}+OR+{}".format(search_term, search_term2, search_term3),
rpp=1000,
geocode=location,
include_entities=True)
data = {}
i = 1
for tweet in c.items():
data['text'] = tweet.text
print(i, ":", data)
i += 1
time.sleep(1)
wfile = open(os.getcwd()+"/workk2.txt", mode='w')
data = {}
i = 0
for tweet in c.items():
data['text'] = tweet.text
wfile.write(data['text']+'\n')
i += 1
wfile.close()

TypeError: tuple indices must be integers or slices, not str Python sentiment tweet

I am trying to capture real live tweets. I am trying to access contents using the json library, and create new objects and append this into a list.
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import tweepy
import json
import urllib.parse
from urllib.request import urlopen
import json
# Variables that contains the user credentials to access Twitter API
consumer_key = ""
consumer_secret = "C"
access_token = ""
access_token_secret = ""
sentDexAuth = ''
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
def sentimentAnalysis(text):
encoded_text = urllib.parse.quote(text)
API_Call = 'http://sentdex.com/api/api.php?text=' + encoded_text + '&auth=' + sentDexAuth
output = urlopen(API_Call).read()
return output
class StdOutListener(StreamListener):
def __init___(self):
self.tweet_data = []
def on_data(self, data):
tweet = json.loads(data)
for x in tweet.items():
sentimentRating = sentimentAnalysis(x['text'])
actualtweets = {
'created_at' : x['created_at'],
'id' : x['id'],
'tweets': x['text'] + sentimentRating
}
self.tweet_data.append(actualtweets)
with open('rawtweets2.json', 'w') as out:
json.dump(self.tweet_data, out)
print(tweet)
l = StdOutListener()
stream = Stream(auth, l)
keywords = ['iknow']
stream.filter(track=keywords)
I believe that i am accessing the json objects correctly however i am not sure of this error, i need it to be a string for my sentiment function to work, and
iam getting a type error:
sentimentRating = sentimentAnalysis(x['text'])
TypeError: tuple indices must be integers or slices, not str
Here,
for x in tweet.items():
sentimentRating = sentimentAnalysis(x['text'])
x is a tuple of your dictionary's (key,value)so you have to pass index.
if you simply want the data of 'text' key. you can write tweet['text']
Your problem is that x is tuple, so you need to use numeric indexes like x[1], not strings.

Every 1 minute, generate a report based only on the data tweeted in last 5 minutes

My code gives continuous data, but I wanted to filter the data to last five minutes. Additionally, I wanted to report it every 1 minute. What I need to do for that?
try:
import json
except ImportError:
import simplejson as json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
ACCESS_TOKEN = 'secret'
ACCESS_SECRET = 'secret'
CONSUMER_KEY = 'secret'
CONSUMER_SECRET = 'secret'
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = TwitterStream(auth=oauth)
iterator = twitter_stream.statuses.filter(track="car", language="en")
for tweet in iterator:
try:
if 'text' in tweet:
print tweet['user']['name']
print tweet['user']['statuses_count']
# print '\n'
for hashtag in tweet['entities']['hashtags']:
hashtags.append(hashtag['text'])
print hashtags
except:
continue
Thanks in advance.

Using Python and OAuth2 with Twitter streaming API

The Twitter v1 API is now defunct so I've been trying to use the Search and Streaming APIs to collate hashtag information. The Search API is rate limited, so if there are a lot of entries on a hashtag you will probably miss some. Streaming seemed like the way to go.
Using OAuth2 here is my (anonymized) code:
import oauth2 as oauth
import json
consumer_key = "<consumer key from twitter developer site>"
consumer_secret = "<consumer secret>"
oauth_token = "<access token>"
oauth_token_secret = "<access token secret>"
consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
access_token = oauth.Token(key=oauth_token, secret=oauth_token_secret)
client = oauth.Client(consumer, access_token)
terms = json.dumps({'track' : 'twitter'})
stream_endpoint = "https://stream.twitter.com/1.1/statuses/filter.json"
response, data = client.request(stream_endpoint,"POST", body=terms, headers={'Content-Type':'application/json'})
The issue I run into is this always returns the following message:
>>>'No filter parameters found. Expect at least one parameter: follow track locations\r\n'
I think your error is becouse using JSON data on
terms = json.dumps({'track' : 'twitter'})
You should write your code just like this
terms = 'track=twitter'
USER = request.params.get('username', '00000')
LIMIT = request.params.get('limit', '50')
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
consumer_key ='424245wfdsfa4'
consumer_secret ='afar234252523adsasd'
if consumer_key is None or consumer_secret is None:
print 'you need consumer_key & consumer_secret key'
sys.exit(1)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
response, content = oauth_client.request(REQUEST_TOKEN_URL, 'POST')
if response['status'] == '200':
request_token = dict(parse_qsl(content))
else:
print 'Invalid response from Twitter requesting token.........: %s' % response['status']
endpoint = 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=' + USER + '&count=' + LIMIT
response, content = oauth_client.request(endpoint, 'GET')
url = response['content-location']
f = urllib2.urlopen(url)
response = f.read()
return simplejson.loads(response)

Categories