Error while write into dockerized MongoDB - python

I have run mongo db in docker container with command:
docker run -d -p 127.0.0.1:27017:27017 --name my-mongo -e MONGO_INITDB_ROOT_USERNAME=root -e MONGO_INITDB_ROOT_PASSWORD=pass mongo:4.4.0
It seems to work well.
I try to connect to this Mongo and make some writings with python libraries (I tried with synchronous and asynchronous one). Here is an example of code of motor library:
import asyncio
import motor.motor_asyncio
def initialization():
obj_client = motor.motor_asyncio.AsyncIOMotorClient(
host="127.0.0.1",
password="pass",
port=27017,
username="root"
)
obj_database = obj_client["test"]
obj_collection = obj_database["collection"]
return obj_database, obj_collection
async def do_insert(db, collection):
document = {'key': 'value'}
result = await collection.insert_one(document)
print('result %s' % repr(result.inserted_id))
if __name__ == "__main__":
obj_client, obj_database, obj_collection = initialization()
loop = asyncio.get_event_loop()
loop.run_until_complete(do_insert(obj_client, obj_database, obj_collection))
I get an error while insert_one() is been calling:
Traceback (most recent call last):
File "/Users/user1/Documents/python_projects/unsorted/mongo_inersection.py", line 25, in <module>
loop.run_until_complete(do_insert(obj_client, obj_database, obj_collection))
File "/usr/local/Cellar/python#3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/Users/user1/Documents/python_projects/unsorted/mongo_inersection.py", line 19, in do_insert
result = await collection.insert_one(document)
File "/usr/local/Cellar/python#3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/python3.8/site-packages/pymongo/collection.py", line 698, in insert_one
self._insert(document,
File "/usr/local/lib/python3.8/site-packages/pymongo/collection.py", line 613, in _insert
return self._insert_one(
File "/usr/local/lib/python3.8/site-packages/pymongo/collection.py", line 602, in _insert_one
self.__database.client._retryable_write(
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1497, in _retryable_write
with self._tmp_session(session) as s:
File "/usr/local/Cellar/python#3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1829, in _tmp_session
s = self._ensure_session(session)
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1816, in _ensure_session
return self.__start_session(True, causal_consistency=False)
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1766, in __start_session
server_session = self._get_server_session()
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1802, in _get_server_session
return self._topology.get_server_session()
File "/usr/local/lib/python3.8/site-packages/pymongo/topology.py", line 485, in get_server_session
self._select_servers_loop(
File "/usr/local/lib/python3.8/site-packages/pymongo/topology.py", line 215, in _select_servers_loop
raise ServerSelectionTimeoutError(
pymongo.errors.ServerSelectionTimeoutError: 127.0.0.1:27017: [Errno 61] Connection refused, Timeout: 30s, Topology Description: <TopologyDescription id: 5f67d9e04b411f070c264a7d, topology_type: Single, servers: [<ServerDescription ('127.0.0.1', 27017) server_type: Unknown, rtt: None, error=AutoReconnect('127.0.0.1:27017: [Errno 61] Connection refused')>]>
[Finished in 31.0s with exit code 1]
[cmd: ['/usr/local/bin/python3', '-u', '/Users/user1/Documents/python_projects/unsorted/mongo_inersection.py']]
[dir: /Users/user1/Documents/python_projects/unsorted]
[path: /usr/bin:/bin:/usr/sbin:/sbin]

You can check if the IP of the docker container that runs the MongoDB is the one (the host) you are using to connect to MongoDB:
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' my-mongo

Problem is that I use docker on MacOS in docker-machine (I forgot about it):
$ docker-machine ls
NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
default * virtualbox Running tcp://192.168.99.100:2376 v19.03.5
I used 192.168.99.100:27017 to connect to MongoDB and problem was resolved. Thanks a lot

Related

Can't connect to Cassandra using python but can through cqlsh

I'm trying to test my pod connection to Cassandra using python. This is the code that I have:
host = '<hostname>'
port_num = 9092
default_fetch_size = 5000
idle_heartbeat_interval = 30
idle_heartbeat_timeout = 30
connect_timeout = 10
executor_threads = 2
protocol_version = 4
db_keyspace = 'fraud'
auth_provider = PlainTextAuthProvider(username=username, password=password)
cluster = Cluster([host], port=port_num, auth_provider=auth_provider,
idle_heartbeat_interval=idle_heartbeat_interval,
idle_heartbeat_timeout=idle_heartbeat_timeout,
connect_timeout=connect_timeout,
protocol_version=protocol_version,
executor_threads=executor_threads)
session = cluster.connect()
And I get the error:
Traceback (most recent call last):
File "run.py", line 3, in <module>
CassProvider().test_cassandra()
File "/app/tmp_cassandra_connection.py", line 59, in test_cassandra
cassandra_connector = CassandraDbConnector().get_session()
File "/usr/local/lib/python3.6/site-packages/project/src/objects/type_objects.py", line 11, in __call__
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
File "/app/tmp_cassandra_connection.py", line 41, in __init__
session = cluster.connect()
File "cassandra/cluster.py", line 1664, in cassandra.cluster.Cluster.connect
File "cassandra/cluster.py", line 1700, in cassandra.cluster.Cluster.connect
File "cassandra/cluster.py", line 1687, in cassandra.cluster.Cluster.connect
File "cassandra/cluster.py", line 3485, in cassandra.cluster.ControlConnection.connect
File "cassandra/cluster.py", line 3530, in cassandra.cluster.ControlConnection._reconnect_internal
cassandra.cluster.NoHostAvailable: ('Unable to connect to any servers', {'<ip>:9092': OSError(None, "Tried connecting to [('<ip>', 9092)]. Last error: timed out")
but when I get on the same pod using bash I can connect to Cassandra using cqlsh without a problem using the same host, default port, username and password.
Do you know why the python code on the pod gives me problems but the cqlsh is working perfectly?
Thanks
'Unable to connect to any servers', {'<ip>:9092':
Try setting the port to 9042, which is the default.
port_num = 9042

Gunicorn + eventlet use redis connection in SIGTERM signal handler

I'm facing an issue related to using an active I/O connection in the SigTerm handler using gunicorn eventlet server.
server.py
def exit_with_grace(*args):
conn = get_redis_connection()
conn.set('exited_gracefully', True)
signal.signal(signal.SIGTERM, exit_with_grace)
I also tried to fire up the celery task (using amqp broker) but all my ideas failed. When I start server in debug mode using python server.py it works perfectly. Gunicorn+ eventlet does not allow to connect to redis in sigterm handler, resulting with an following error:
Traceback (most recent call last):
File "/project/handlers/socketio/redis_context_backend.py", line 256, in publish_pattern
return conn.publish(pattern, serialized)
File "/project/venv/lib/python3.6/site-packages/redis/client.py", line 3098, in publish
return self.execute_command('PUBLISH', channel, message)
File "/project/venv/lib/python3.6/site-packages/redis/client.py", line 898, in execute_command
conn = self.connection or pool.get_connection(command_name, **options)
File "/project/venv/lib/python3.6/site-packages/redis/connection.py", line 1192, in get_connection
connection.connect()
File "/project/venv/lib/python3.6/site-packages/redis/connection.py", line 559, in connect
sock = self._connect()
File "/project/venv/lib/python3.6/site-packages/redis/connection.py", line 603, in _connect
sock.connect(socket_address)
File "/project/venv/lib/python3.6/site-packages/eventlet/greenio/base.py", line 250, in connect
self._trampoline(fd, write=True)
File "/project/venv/lib/python3.6/site-packages/eventlet/greenio/base.py", line 210, in _trampoline
mark_as_closed=self._mark_as_closed)
File "/project/venv/lib/python3.6/site-packages/eventlet/hubs/__init__.py", line 142, in trampoline
assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
Gunicorn command:
gunicorn --worker-class eventlet -w 1 server:ws --reload -b localhost:5001

Python package APSscheduler throws an error when starting scheduler if mongodb is used as a jobstore

I'm trying to run some code with APScheduler with MongoDB as a jobstore. I've downloaded pymongo and I tested it, so it does work. APScheduler will work when I instantiate the scheduler, but when I run scheduler.start() it throws the following error:
Traceback (most recent call last):
File "aps_ro.py", line 56, in <module>
scheduler.start()
File "/usr/lib/python3.8/site-packages/apscheduler/schedulers/background.py", line 33, in start
BaseScheduler.start(self, *args, **kwargs)
File "/usr/lib/python3.8/site-packages/apscheduler/schedulers/base.py", line 158, in start
store.start(self, alias)
File "/usr/lib/python3.8/site-packages/apscheduler/jobstores/mongodb.py", line 57, in start
self.collection.ensure_index('next_run_time', sparse=True)
File "/usr/lib/python3.8/site-packages/pymongo/collection.py", line 2028, in ensure_index
self.__create_index(keys, kwargs, session=None)
File "/usr/lib/python3.8/site-packages/pymongo/collection.py", line 1881, in __create_index
with self._socket_for_writes(session) as sock_info:
File "/usr/lib/python3.8/site-packages/pymongo/collection.py", line 195, in _socket_for_writes
return self.__database.client._socket_for_writes(session)
File "/usr/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1266, in _socket_for_writes
server = self._select_server(writable_server_selector, session)
File "/usr/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1253, in _select_server
server = topology.select_server(server_selector)
File "/usr/lib/python3.8/site-packages/pymongo/topology.py", line 233, in select_server
return random.choice(self.select_servers(selector,
File "/usr/lib/python3.8/site-packages/pymongo/topology.py", line 192, in select_servers
server_descriptions = self._select_servers_loop(
File "/usr/lib/python3.8/site-packages/pymongo/topology.py", line 208, in _select_servers_loop
raise ServerSelectionTimeoutError(
pymongo.errors.ServerSelectionTimeoutError: localhost:27017: [Errno 111] Connection refused
My actual code sample is as follows:
jobstores = {
'mongo': {'type': 'mongodb'},
'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
}
executors = {...}
job_defaults = {...}
scheduler = BackgroundScheduler()
scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
#scheduler.scheduled_job(trigger='cron', day_of_week=4, hour=22, minute=21, second=0)
def tester():
print("hello")
scheduler.start()
By default, the mongodb python driver will connect to a database on host localhost and port 27017.
So you will need to either:
make sure mongodb server is running on the same machine as the python code and is running, on the default port (you can test this easily by typing mongo at your favourite shell and checking you get a > prompt and not an error).
supply a connection string or connection arguments if mongodb is running on a different server or a different port.

python eventlet fabric gives IOError: [Errno 1] Operation not permitted when running celery worker

I have a celery worker consisting of a task to run puppet agent on a remote machine which is a blocking call. I also have other http api flows in other tasks that can benefit from eventlet
#app.task(soft_time_limit=600)
def run_puppet_agent(hostname):
try:
env.host_string = self.host
env.user = self.username
env.password = self.password
return sudo('puppet agent -t')
except SoftTimeLimitExceeded:
raise Exception('Puppet agent TIMED OUT AFTER 600 seconds')
except Exception as e:
#run_puppet_agent.retry(args=[hostname], countdown=20)
LOG.info('')
raise Exception('Puppet agent failed with error message %s' % e.message)
When I run the worker as
celery multi start 2 -A phantom.celery.manage -P eventlet -c 15 --loglevel=INFO
It gives me an exception trace like follows:
Traceback (most recent call last):
File "/home/uruddarraju/Phantom/phantom/tasks/fabric_tasks.py", line 19, in run_puppet_agent
return sudo(command, quiet=False)
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/fabric/network.py", line 639, in host_prompting_wrapper
return func(*args, **kwargs)
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/fabric/operations.py", line 1095, in sudo
stderr=stderr, timeout=timeout, shell_escape=shell_escape,
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/fabric/operations.py", line 911, in _run_command
stderr=stderr, timeout=timeout)
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/fabric/operations.py", line 795, in _execute
worker.raise_if_needed()
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/fabric/thread_handling.py", line 12, in wrapper
callable(*args, **kwargs)
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/fabric/io.py", line 231, in input_loop
r, w, x = select([sys.stdin], [], [], 0.0)
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/eventlet/green/select.py", line 79, in select
listeners.append(hub.add(hub.READ, k, on_read))
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/eventlet/hubs/epolls.py", line 52, in add
self.register(fileno, new=True)
File "/home/uruddarraju/Phantom/tools/virtualenv/local/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 45, in register
self.poll.register(fileno, mask)
IOError: [Errno 1] Operation not permitted
I have read: Why does select.select() work with disk files but not epoll()? and see that there is a problem having eventlet working with fabric or such blocking calls. Is there a way I can tell celery not to monkey patch this particular task ?
This error was mitigated when I used gevent instead. I hope it enables the same performance for me.

python paramiko module error with callback

I'm trying to use the paramiko module to copy a (big) file in my local network, and get the output to display a GtkProgressBar.
A part of my code is:
...
NetworkCopy.pbar.set_text("Copy of the file in the Pi...")
while gtk.events_pending(): # refresh the progress bar
gtk.main_iteration()
self.connection(transferred, toBeTransferred)
def connection(self, transferred, toBeTransferred):
sftp = self.sftp
fichier_pc = self.fichier_pc
chemin_pi = self.chemin_pi # var names are in french !
fichier = self.fichier
transferred = self.transferred
toBeTransferred = self.toBeTransferred
print "Transferred: {0}\tStill to send: {1}".format(transferred, toBeTransferred)
sftp.put(fichier_pc, chemin_pi + fichier, callback=self.connection)
In the terminal, I can see
Transferred: 0 Still to send: 3762398252
for a while, but after 10s I have this error:
File "network_copier.py", line 158, in connection
sftp.put(fichier_pc, chemin_pi + fichier, callback=self.connection)
File "/usr/lib/python2.7/dist-packages/paramiko/sftp_client.py", line 615, in put
return self.putfo(fl, remotepath, os.stat(localpath).st_size, callback, confirm)
File "/usr/lib/python2.7/dist-packages/paramiko/sftp_client.py", line 577, in putfo
fr.close()
File "/usr/lib/python2.7/dist-packages/paramiko/sftp_file.py", line 67, in close
self._close(async=False)
File "/usr/lib/python2.7/dist-packages/paramiko/sftp_file.py", line 88, in _close
self.sftp._request(CMD_CLOSE, self.handle)
File "/usr/lib/python2.7/dist-packages/paramiko/sftp_client.py", line 689, in _request
return self._read_response(num)
File "/usr/lib/python2.7/dist-packages/paramiko/sftp_client.py", line 721, in _read_response
raise SSHException('Server connection dropped: %s' % (str(e),))
paramiko.SSHException: Server connection dropped:
I have the 1.12.2 version of paramiko, from this ppa
Thanks for your help
Edit: The solution is to use pexpect instead of paramiko. It's working with big files.
See here

Categories