I want to run instances of Quagga on each of my hosts in a Mininet setting. As implemented in the code below, I am able to mount /tmp/<host>/etc/quagga as /etc/quagga for each host, isolating configuration files inside the directory per host (private directories). But when I start Quagga service in each host (last lines in ipconf file below), they all share the same PID number, effectively creating the same process for all of them, although each one has its own Quagga configuration file.
I want to have separate Quagga instances, each with its own PID. How can I achieve this?
Custom topology file my_topo.py:
from mininet.topo import Topo
class my_topo(Topo):
"My custom topology settings"
def __init__(self, enable_all=True):
"Create custom topo."
Topo.__init__(self)
private_dirs = [("/etc/quagga", "/tmp/%(name)s/etc/quagga")]
h1 = self.addHost("h1",
ip="172.31.1.100/24",
privateDirs=private_dirs)
h2 = self.addHost("h2",
ip="172.31.2.100/24",
privateDirs=private_dirs)
h3 = self.addHost("h3",
ip="172.31.3.100/24",
privateDirs=private_dirs)
h4 = self.addHost("h4",
ip="172.31.4.100/24",
privateDirs=private_dirs)
h5 = self.addHost("h5",
ip="172.32.1.2/30",
privateDirs=private_dirs)
sA = self.addSwitch("s5")
sB = self.addSwitch("s6")
sC = self.addSwitch("s7")
sD = self.addSwitch("s8")
self.addLink(h1, sA)
self.addLink(h2, sB)
self.addLink(h3, sC)
self.addLink(h4, sD)
self.addLink(sA, sB)
self.addLink(sB, sD)
self.addLink(sD, sC)
self.addLink(sC, sA)
self.addLink(sA, sD)
self.addLink(h2, h5, 1, 0)
self.addLink(h4, h5, 1, 1)
topos = { "my_topo": ( lambda: my_topo() ) }
Commands file ipconf:
h1 /etc/init.d/quagga restart
h2 /etc/init.d/quagga restart
h3 /etc/init.d/quagga restart
h4 /etc/init.d/quagga restart
h5 /etc/init.d/quagga restart
Command to run Mininet:
sudo mn --custom mininet/custom/my_topo.py --topo=my_topo --controller=remote,ip=192.168.56.101,port=6633 --pre=ipconf
I figured out myself how to isolate processes from each host using Mininext, an expansion for Mininet which provides greater isolation between the hosts. Since Mininext is not compatible with recent versions of Mininet, I had to downgrade the latter to version 2.1.0, as instructed in the Mininext repository. Now I can run distinct Quagga instances in each host nicely.
Here is the adapted topology code using Mininext library, in case anyone faces the same situation:
import inspect
import os
from mininext.topo import Topo
from mininext.services.quagga import QuaggaService
from collections import namedtuple
QuaggaHost = namedtuple('QuaggaHost', 'name ip lo gw')
class my_topo(Topo):
'My custom topology settings'
def __init__(self):
Topo.__init__(self)
self_path = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())
))
quagga_svc = QuaggaService(autoStop=False)
quagga_base_config_path = self_path + '/configs/'
quagga_hosts = []
quagga_hosts.append(QuaggaHost(name='h1',
ip='172.31.1.100/24',
lo='10.0.1.1/24',
gw='gw 172.31.1.1'))
quagga_hosts.append(QuaggaHost(name='h2',
ip='172.31.2.100/24',
lo='10.0.2.1/24',
gw='gw 172.31.2.1'))
quagga_hosts.append(QuaggaHost(name='h3',
ip='172.31.3.100/24',
lo='10.0.3.1/24',
gw='gw 172.31.3.1'))
quagga_hosts.append(QuaggaHost(name='h4',
ip='172.31.4.100/24',
lo='10.0.4.1/24',
gw='gw 172.31.4.1'))
quagga_hosts.append(QuaggaHost(name='h5',
ip='172.32.1.2/30',
lo='10.0.5.1/24',
gw='gw 172.32.1.1'))
hosts = {}
for host in quagga_hosts:
quagga_container = self.addHost(name=host.name,
ip=host.ip,
defaultRoute=host.gw,
hostname=host.name,
privateLogDir=True,
privateRunDir=True,
inMountNamespace=True,
inPIDNamespace=True,
inUTSNamespace=True)
hosts[host.name] = quagga_container
self.addNodeLoopbackIntf(node=host.name, ip=host.lo)
quagga_svc_config = \
{'quaggaConfigPath': quagga_base_config_path + host.name}
self.addNodeService(node=host.name, service=quagga_svc,
nodeConfig=quagga_svc_config)
sA = self.addSwitch('s5')
sB = self.addSwitch('s6')
sC = self.addSwitch('s7')
sD = self.addSwitch('s8')
self.addLink(hosts['h1'], sA)
self.addLink(hosts['h2'], sB)
self.addLink(hosts['h3'], sC)
self.addLink(hosts['h4'], sD)
self.addLink(sA, sB)
self.addLink(sB, sD)
self.addLink(sD, sC)
self.addLink(sC, sA)
self.addLink(sA, sD)
self.addLink(hosts['h2'], hosts['h5'], 1, 0)
self.addLink(hosts['h4'], hosts['h5'], 1, 1)
topos = {'my_topo': (lambda: my_topo())}
Quagga configuration files must be placed inside configs directory, which lies in the same directory as the topology file. configs has directories for each host, as if each one was a /etc/quagga directory.
Related
I have this code that I am trying to use to connect three routers r1,r2,r3 together. I think I have to use switches to connect host nodes to the routers so each router is connected to a switch that is connected to a host. I have it working for 2 routers but I can't get it to work for three routers
I am using mininet to run the python script. Is there a way to add the ip addresses to the routing tables for the host. I am new to mininet so I am not familiar with connecting routers. I have to do this for 7 routers not just three but I am starting with 3 for now. Is there a better way to do this ?
#!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Node
from mininet.log import setLogLevel, info
from mininet.cli import CLI
class LinuxRouter(Node):
def config(self, **params):
super(LinuxRouter, self).config(**params)
self.cmd('sysctl net.ipv4.ip_forward=1')
def terminate(self):
self.cmd('sysctl net.ipv4.ip_forward=0')
super(LinuxRouter, self).terminate()
class NetworkTopo(Topo):
def build(self, **_opts):
# Add 2 routers in two different subnets
r1 = self.addHost('r1', cls=LinuxRouter, ip='10.0.0.1/24')
r2 = self.addHost('r2', cls=LinuxRouter, ip='10.1.0.1/24')
r3 = self.addHost('r3', cls=LinuxRouter, ip='10.2.0.1/24')
# Add 2 switches
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
# Add host-switch links in the same subnet
self.addLink(s1,
r1,
intfName2='r1-eth1',
params2={'ip': '10.0.0.1/24'})
self.addLink(s2,
r2,
intfName2='r2-eth1',
params2={'ip': '10.1.0.1/24'})
self.addLink(s3,
r3,
intfName2='r3-eth1',
params2={'ip': '10.2.0.1/24'})
# Add router-router link in a new subnet for the router-router connection
self.addLink(r1,
r2,
intfName1='r1-eth2',
intfName2='r2-eth2',
params1={'ip': '10.100.0.1/24'},
params2={'ip': '10.100.0.2/24'})
self.addLink(r1,
r2,
intfName1='r1-eth3',
intfName2='r2-eth3',
params1={'ip': '10.101.0.1/24'},
params2={'ip': '10.101.0.2/24'})
# Adding hosts specifying the default route
d1 = self.addHost(name='d1',
ip='10.0.0.251/24',
defaultRoute='via 10.0.0.1')
d2 = self.addHost(name='d2',
ip='10.1.0.252/24',
defaultRoute='via 10.1.0.1')
d3 = self.addHost(name='d3',
ip='10.1.0.253/24',
defaultRoute='via 10.1.0.1')
# Add host-switch links
self.addLink(d1, s1)
self.addLink(d2, s2)
self.addLink(d3, s2)
def run():
topo = NetworkTopo()
net = Mininet(topo=topo)
# Add routing for reaching networks that aren't directly connected
print info(net['r1'].cmd("ip route add 10.1.0.0/24 via 10.100.0.2 dev r1-eth2"))
print info(net['r2'].cmd("ip route add 10.0.0.0/24 via 10.100.0.1 dev r2-eth2"))
net.start()
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
run()
The reason why router 3 is not working is because you have not connected it! So instead of
self.addLink(r1,
r2,
intfName1='r1-eth3',
intfName2='r2-eth3',
params1={'ip': '10.101.0.1/24'},
params2={'ip': '10.101.0.2/24'})
Use
self.addLink(r1,
r2,
intfName1='r1-eth0',
intfName2='r3-eth0',
params1={'ip': '10.101.0.1/24'},
params2={'ip': '10.101.0.2/24'})
Hello everybody, hope you are all doing well.
I am doing in a project in which I receive GPS data (Longitude, and Latitude) from an Android device via an SQL server. What I am trying to do is to send this Longitude - Latitude data to my SITL vehicle in Ardupilot. I thought about using Dronekit Python API as such:
from dronekit import connect, VehicleMode
import time
import mysql.connector
import time
#--- Start the Software In The Loop (SITL)
import dronekit_sitl
#
sitl = dronekit_sitl.start_default() #(sitl.start)
#connection_string = sitl.connection_string()
mydb = mysql.connector.connect(
host="******",
user="******",
password="*****",
database="koordinat"
)
mycursor = mydb.cursor()
#--- Now that we have started the SITL and we have the connection string (basically the ip and udp port)...
print("Araca bağlanılıyor")
vehicle = connect('tcp:127.0.0.1:5762', wait_ready=False, baud = 115200)
vehicle.wait_ready(True, raise_exception=False)
#-- Read information from the autopilot:
#- Version and attributes
vehicle.wait_ready('autopilot_version')
print('Autopilot version: %s'%vehicle.version)
#- Does the firmware support the companion pc to set the attitude?
print('Supports set attitude from companion: %s'%vehicle.capabilities.set_attitude_target_local_ned)
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while(True):
mycursor.execute("SELECT * FROM koordinat WHERE 1")
location = str(mycursor.fetchall())
location = location.split(",")
location[0] = location[0].replace("[", "")
location[0] = location[0].replace("(", "")
location[0] = location[0].replace("'", "")
location[1] = location[1].replace("[", "")
location[1] = location[1].replace(")", "")
location[1] = location[1].replace("'", "")
location[1] = location[1].replace(")", "")
# Converting the longitude and latitude to float, before assigning to the vehicle GPS data:
location[0] = float(location[0])
location[1] = float(location[1])
# Setting the location of the vehicle:
vehicle.location.global_frame.lat = location[0]
vehicle.location.global_frame.lon = location[1]
print('Konum:', str(vehicle.location.global_frame.lat)+str(","), str(vehicle.location.global_frame.lon)+str(","), str(vehicle.location.global_frame.alt))
#- When did we receive the last heartbeat
print('Son bilgi gelişi: %s'%vehicle.last_heartbeat)
time.sleep(1)
However, when I check from the SITL and Mission Planner (also from the print statement from my code) the location does not change; the simulator simply ignores those commands sent by the Dronekit. Is there a working method to accomplish what I am trying to do? I tried to change the sim_vehicle.py script which I use to start the simulation. But I was only able to change the starting/home location of the vehicle. I was not able to change the current location of the vehicle on SITL and Mission Planner.
This is incorrect. You're modifying the attribute of the vehicle object that's connected to the SITL, not sending any commands to the actual autopilot.
vehicle.location.global_frame.lat = location[0]
vehicle.location.global_frame.lon = location[1]
What you want to do is set the mode to GUIDED and use the simple_goto function in dronekit to make the drone move to lat/lon/alt coordinates.
Otherwise, you can also send this MAVLink command SET_POSITION_TARGET_GLOBAL_INT to guide it.
I have this simple master/slave scripts, using locustio==0.13.5. This is the master:
#!/usr/bin/env python3
import logging
import argparse
import os
import sys
import time
import urllib3
import locust
import utils
class TestSomething(locust.TaskSet):
#locust.task(1)
def get_hosts_small(self):
print(self.locust.message)
return self.client.get(url='http://localhost', verify=False)
class TheSomething(locust.HttpLocust):
task_set = TestSomething
wait_time = locust.constant(0)
urllib3.disable_warnings()
logging.basicConfig(level=logging.DEBUG)
options = argparse.Namespace()
options.host = "http://localhost"
options.num_clients = 1
options.hatch_rate = options.num_clients
options.num_requests = 10
options.stop_timeout = 1
options.step_load = False
options.reset_stats = False
options.test_duration = 3
options.master_host = 'localhost'
options.master_port = 5557
options.master_bind_host = '*'
options.master_bind_port = 5557
options.heartbeat_liveness = 3
options.heartbeat_interval = 1
options.expect_slaves = 1
test_set = TheSomething
test_set.message = 'Hello'
locust_runner = locust.runners.MasterLocustRunner([test_set], options)
while len(locust_runner.clients.ready) < options.expect_slaves:
logging.info("Waiting for slaves to be ready, %s of %s connected", len(locust_runner.clients.ready), options.expect_slaves)
time.sleep(1)
locust_runner.start_hatching(locust_count=options.num_clients, hatch_rate=options.hatch_rate)
time.sleep(options.test_duration)
locust_runner.quit()
locusts.events.quitting.fire(reverse=True)
print(locust_runner.stats) # actually using custom function to format results
and this is the slave:
#!/usr/bin/env python3
import logging
import argparse
import os
import sys
import time
import locust
class TestSomething(locust.TaskSet):
#locust.task(1)
def get_hosts_small(self):
print(self.locust.message)
return self.client.get(url='http://localhost', verify=False)
class TheSomething(locust.HttpLocust):
task_set = TestSomething
wait_time = locust.constant(0)
logging.basicConfig(level=logging.DEBUG)
options = argparse.Namespace()
options.host = "http://localhost"
options.num_clients = 1
options.hatch_rate = options.num_clients
options.num_requests = 10
options.stop_timeout = 1
options.step_load = False
options.reset_stats = False
options.test_duration = 3
options.master_host = 'localhost'
options.master_port = 5557
options.master_bind_host = '*'
options.master_bind_port = 5557
options.heartbeat_liveness = 3
options.heartbeat_interval = 1
test_set = TheSomething
test_set.message = 'Hello'
locust_runner = locust.runners.SlaveLocustRunner([test_set], options)
locust_runner.worker()
When I start master and slave, I can see how master waits for a slave to come up, then how slave is executing the test and I see report printed by master before it finished. But slave does not finishes - it hangs running, doing nothing (I assume).
I would like slave to either exit or to restart and attempt to connect to master again in case I just rerun the master script. Does anybody have any idea on how to do that please?
I usually just set any parameters as environment variables and read them from the script (os.environ['MY_ENV_VAR'])
If you're running the slaves on the same server that should be easy (just run export MY_ENV_VAR=Hello before starting the processes), if you are running slaves on different machines it would be a little more complicated but check out locust-swarm that does the work for you (https://github.com/SvenskaSpel/locust-swarm)
As for the "do stuff after the test" there is a "quitting" event that you can subscribe to:
https://docs.locust.io/en/0.14.5/api.html#available-hooks
Or, for the upcoming 1.0 version:
https://docs.locust.io/en/latest/api.html#locust.event.Events.quitting
I use a template of a python script (running on Raspberry Pi) to send sensor data (i2c) via WiFi to my PC. The problem is, the values are not static. If I start the Web application, it reads the data from the sensor only once. So, if I check the values from my PC, I can see it sent the data correctly, but they won't change.
How can I modify the script to refresh the i2c_output value, without starting the script over and over again?
Here is what I have tried so far:
import web
import sys, os
import smbus
import math
#
# Lot of initialisation... forget that part
#
accel_xout = read_word_2c(0x3b)
accel_yout = read_word_2c(0x3d)
accel_zout = read_word_2c(0x3f)
afs_sel = read_word_2c(0x28)
LSB_afs_sel = 16384.0
accel_xout_sc = accel_xout / LSB_afs_sel
accel_yout_sc = accel_yout / LSB_afs_sel
accel_zout_sc = accel_zout / LSB_afs_sel
i2c_output = str(accel_xout_sc) + str(accel_yout_sc) + str(accel_zout_sc)
urls = ( '/','Index',
)
class Index:
def GET(self):
return i2c_output
if __name__=="__main__":
app=web.application(urls,globals())
app.run()
Move the code that retrieves the sensor data into a method and invoke that method each time the index is called.
def get_sensor_output():
#
# Lot of initialisation... forget that part
#
accel_xout = read_word_2c(0x3b)
accel_yout = read_word_2c(0x3d)
accel_zout = read_word_2c(0x3f)
afs_sel = read_word_2c(0x28)
LSB_afs_sel = 16384.0
accel_xout_sc = accel_xout / LSB_afs_sel
accel_yout_sc = accel_yout / LSB_afs_sel
accel_zout_sc = accel_zout / LSB_afs_sel
i2c_output = str(accel_xout_sc) + str(accel_yout_sc) + str(accel_zout_sc)
return i2c_output
class Index:
def GET(self):
return get_sensor_output()
Note: You may want to implement some sort of cache depending on how often this get method is called. currently each call will retrieve the sensor data, which may or may not be an expensive operation that will drain the battery on your pi
How to efficiently and correctly manage processes with python. I want to run commands like:
/etc/init.d/daemon stop
service daemon start
systemctl restart daemon
Is there any python module available for this?
Any help will be highly appreciated.
I found a way using systemd dbus interface. Here is the code:
import dbus
import subprocess
import os
import sys
import time
SYSTEMD_BUSNAME = 'org.freedesktop.systemd1'
SYSTEMD_PATH = '/org/freedesktop/systemd1'
SYSTEMD_MANAGER_INTERFACE = 'org.freedesktop.systemd1.Manager'
SYSTEMD_UNIT_INTERFACE = 'org.freedesktop.systemd1.Unit'
bus = dbus.SystemBus()
proxy = bus.get_object('org.freedesktop.PolicyKit1', '/org/freedesktop/PolicyKit1/Authority')
authority = dbus.Interface(proxy, dbus_interface='org.freedesktop.PolicyKit1.Authority')
system_bus_name = bus.get_unique_name()
subject = ('system-bus-name', {'name' : system_bus_name})
action_id = 'org.freedesktop.systemd1.manage-units'
details = {}
flags = 1 # AllowUserInteraction flag
cancellation_id = '' # No cancellation id
result = authority.CheckAuthorization(subject, action_id, details, flags, cancellation_id)
if result[1] != 0:
sys.exit("Need administrative privilege")
systemd_object = bus.get_object(SYSTEMD_BUSNAME, SYSTEMD_PATH)
systemd_manager = dbus.Interface(systemd_object, SYSTEMD_MANAGER_INTERFACE)
unit = systemd_manager.GetUnit('cups.service')
unit_object = bus.get_object(SYSTEMD_BUSNAME, unit)
#unit_interface = dbus.Interface(unit_object, SYSTEMD_UNIT_INTERFACE)
#unit_interface.Stop('replace')
systemd_manager.StartUnit('cups.service', 'replace')
while list(systemd_manager.ListJobs()):
time.sleep(2)
print 'there are pending jobs, lets wait for them to finish.'
prop_unit = dbus.Interface(unit_object, 'org.freedesktop.DBus.Properties')
active_state = prop_unit.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
sub_state = prop_unit.Get('org.freedesktop.systemd1.Unit', 'SubState')
print active_state, sub_state