Connecting with Athena using Python and pyathenajdbc - python

I am trying to connect to AWS Athena using python. I am trying to use pyathenajdbc to achieve this task. The issue I am having is obtaining a connection. When I run the code below, I receive an error message stating it cannot find the AthenaDriver. ( java.lang.RuntimeException: Class com.amazonaws.athena.jdbc.AthenaDriver not found). I did download this file from AWS and I have confirmed it is sitting in that directory.
from mdpbi.rsi.config import *
from mdpbi.tools.functions import mdpLog
from pkg_resources import resource_string
import argparse
import os
import pyathenajdbc
import sys
SCRIPT_NAME = "Athena_Export"
ATHENA_JDBC_CLASSPATH = "/opt/amazon/athenajdbc/AthenaJDBC41-1.0.0.jar"
EXPORT_OUTFILE = "RSI_Export.txt"
EXPORT_OUTFILE_PATH = os.path.join(WORKINGDIR, EXPORT_OUTFILE)
def get_arg_parser():
"""This function returns the argument parser object to be used with this script"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
return parser
def main():
args = get_arg_parser().parse_args(sys.argv[1:])
logger = mdpLog(SCRIPT_NAME, LOGDIR)
SQL = resource_string("mdpbi.rsi.athena.resources", "athena.sql")
conn = pyathenajdbc.connect(
s3_staging_dir="s3://athena",
access_key=AWS_ACCESS_KEY_ID,
secret_key=AWS_SECRET_ACCESS_KEY,
region_name="us-east-1",
log_path=LOGDIR,
driver_path=ATHENA_JDBC_CLASSPATH
)
try:
with conn.cursor() as cursor:
cursor.execute(SQL)
logger.info(cursor.description)
logger.info(cursor.fetchall())
finally:
conn.close()
return 0
if __name__ == '__main__':
rtn = main()
sys.exit(rtn)
Traceback (most recent call last): File
"/usr/lib64/python2.7/runpy.py", line 174, in _run_module_as_main
"main", fname, loader, pkg_name) File "/usr/lib64/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals File "/home/ec2-user/jason_testing/mdpbi/rsi/athena/main.py", line 53,
in
rtn = main() File "/home/ec2-user/jason_testing/mdpbi/rsi/athena/main.py", line 39,
in main
driver_path=athena_jdbc_driver_path File "/opt/mdpbi/Python_Envs/2.7.10/local/lib/python2.7/dist-packages/pyathenajdbc/init.py",
line 65, in connect
driver_path, **kwargs) File "/opt/mdpbi/Python_Envs/2.7.10/local/lib/python2.7/dist-packages/pyathenajdbc/connection.py",
line 68, in init
jpype.JClass(ATHENA_DRIVER_CLASS_NAME) File "/opt/mdpbi/Python_Envs/2.7.10/lib64/python2.7/dist-packages/jpype/_jclass.py",
line 55, in JClass
raise _RUNTIMEEXCEPTION.PYEXC("Class %s not found" % name)

The JDBC driver requires Java 8. I was currently running Java 7. I was able to install another version of Java on EC2 instance.
https://tecadmin.net/install-java-8-on-centos-rhel-and-fedora/#
I had to also set the java version in my code. With these changes, the code now runs as expected.
from mdpbi.rsi.config import *
from mdpbi.tools.functions import mdpLog
from pkg_resources import resource_string
import argparse
import os
import pyathenajdbc
import sys
SCRIPT_NAME = "Athena_Export"
def get_arg_parser():
"""This function returns the argument parser object to be used with this script"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
return parser
def main():
args = get_arg_parser().parse_args(sys.argv[1:])
logger = mdpLog(SCRIPT_NAME, LOGDIR)
SQL = resource_string("mdpbi.rsi.athena.resources", "athena.sql")
os.environ["JAVA_HOME"] = "/opt/jdk1.8.0_121"
os.environ["JRE_HOME"] = "/opt/jdk1.8.0_121/jre"
os.environ["PATH"] = "/opt/jdk1.8.0_121/bin:/opt/jdk1.8.0_121/jre/bin"
conn = pyathenajdbc.connect(
s3_staging_dir="s3://mdpbi.data.rsi.out/",
access_key=AWS_ACCESS_KEY_ID,
secret_key=AWS_SECRET_ACCESS_KEY,
schema_name="rsi",
region_name="us-east-1"
)
try:
with conn.cursor() as cursor:
cursor.execute(SQL)
logger.info(cursor.description)
logger.info(cursor.fetchall())
finally:
conn.close()
return 0
if __name__ == '__main__':
rtn = main()
sys.exit(rtn)

Try this :
pyathenajdbc.ATHENA_JAR = ATHENA_JDBC_CLASSPATH
You won't be needing to specify the driver_path argument in the connection method

Related

json.decoder.JSONDecodeError when migrating to Kubeflow Pipelines v2

Copied from here: https://github.com/kubeflow/pipelines/issues/7608
I have a generated code file that runs against Kubeflow. It ran fine on Kubeflow v1, and now I'm moving it to Kubeflow v2. When I do this, I get the following error:
json.decoder.JSONDecodeError: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
I honestly don't even know where to go next. It feels like something is fundamentally broken for something to fail in the first character, but I can't see it (it's inside the kubeflow execution).
Thanks!
Environment
How did you deploy Kubeflow Pipelines (KFP)?
Standard deployment to AWS
KFP version:
1.8.1
KFP SDK version:
1.8.12
Here's the logs:
time="2022-04-26T17:38:09.547Z" level=info msg="capturing logs" argo=true
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead:
https://pip.pypa.io/warnings/venv
[KFP Executor 2022-04-26 17:38:24,691 INFO]: Looking for component `run_info_fn` in --component_module_path `/tmp/tmp.NJW6PWXpIt/ephemeral_component.py`
[KFP Executor 2022-04-26 17:38:24,691 INFO]: Loading KFP component "run_info_fn" from /tmp/tmp.NJW6PWXpIt/ephemeral_component.py (directory "/tmp/tmp.NJW6PWXpIt" and module name "ephemeral_component")
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.7/site-packages/kfp/v2/components/executor_main.py", line 104, in <module>
executor_main()
File "/usr/local/lib/python3.7/site-packages/kfp/v2/components/executor_main.py", line 94, in executor_main
executor_input = json.loads(args.executor_input)
File "/usr/local/lib/python3.7/json/__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "/usr/local/lib/python3.7/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/local/lib/python3.7/json/decoder.py", line 353, in raw_decode
obj, end = self.scan_once(s, idx)
json.decoder.JSONDecodeError: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
time="2022-04-26T17:38:24.803Z" level=error msg="cannot save artifact /tmp/outputs/run_info/data" argo=true error="stat /tmp/outputs/run_info/data: no such file or directory"
Error: exit status 1
Here's the files to repro:
root_pipeline_04d99580c84b47c28405a2c8bcae8703.py
import kfp.v2.components
from kfp.v2.dsl import InputPath
from kubernetes.client.models import V1EnvVar
from kubernetes import client, config
from typing import NamedTuple
from base64 import b64encode
import kfp.v2.dsl as dsl
import kubernetes
import json
import kfp
from run_info import run_info_fn
from same_step_000_ce6494722c474dd3b8bef482bb976557 import same_step_000_ce6494722c474dd3b8bef482bb976557_fn
run_info_comp = kfp.v2.dsl.component(
func=run_info_fn,
packages_to_install=[
"kfp",
"dill",
],
)
same_step_000_ce6494722c474dd3b8bef482bb976557_comp = kfp.v2.dsl.component(
func=same_step_000_ce6494722c474dd3b8bef482bb976557_fn,
base_image="public.ecr.aws/j1r0q0g6/notebooks/notebook-servers/codeserver-python:v1.5.0",
packages_to_install=[
"dill",
"requests",
# TODO: make this a loop
],
)
#kfp.dsl.pipeline(name="root_pipeline_compilation",)
def root(
context: str='', metadata_url: str='',
):
# Generate secrets (if not already created)
secrets_by_env = {}
env_vars = {
}
run_info = run_info_comp(run_id=kfp.dsl.RUN_ID_PLACEHOLDER)
same_step_000_ce6494722c474dd3b8bef482bb976557 = same_step_000_ce6494722c474dd3b8bef482bb976557_comp(
input_context_path="",
run_info=run_info.outputs["run_info"],
metadata_url=metadata_url
)
same_step_000_ce6494722c474dd3b8bef482bb976557.execution_options.caching_strategy.max_cache_staleness = "P0D"
for k in env_vars:
same_step_000_ce6494722c474dd3b8bef482bb976557.add_env_variable(V1EnvVar(name=k, value=env_vars[k]))
run_info.py
"""
The run_info component fetches metadata about the current pipeline execution
from kubeflow and passes it on to the user code step components.
"""
from typing import NamedTuple
def run_info_fn(
run_id: str,
) -> NamedTuple("RunInfoOutput", [("run_info", str),]):
from base64 import urlsafe_b64encode
from collections import namedtuple
import datetime
import base64
import dill
import kfp
client = kfp.Client(host="http://ml-pipeline:8888")
run_info = client.get_run(run_id=run_id)
run_info_dict = {
"run_id": run_info.run.id,
"name": run_info.run.name,
"created_at": run_info.run.created_at.isoformat(),
"pipeline_id": run_info.run.pipeline_spec.pipeline_id,
}
# Track kubernetes resources associated wth the run.
for r in run_info.run.resource_references:
run_info_dict[f"{r.key.type.lower()}_id"] = r.key.id
# Base64-encoded as value is visible in kubeflow ui.
output = urlsafe_b64encode(dill.dumps(run_info_dict))
return namedtuple("RunInfoOutput", ["run_info"])(
str(output, encoding="ascii")
)
same_step_000_ce6494722c474dd3b8bef482bb976557.py
import kfp
from kfp.v2.dsl import component, Artifact, Input, InputPath, Output, OutputPath, Dataset, Model
from typing import NamedTuple
def same_step_000_ce6494722c474dd3b8bef482bb976557_fn(
input_context_path: InputPath(str),
output_context_path: OutputPath(str),
run_info: str = "gAR9lC4=",
metadata_url: str = "",
):
from base64 import urlsafe_b64encode, urlsafe_b64decode
from pathlib import Path
import datetime
import requests
import tempfile
import dill
import os
input_context = None
with Path(input_context_path).open("rb") as reader:
input_context = reader.read()
# Helper function for posting metadata to mlflow.
def post_metadata(json):
if metadata_url == "":
return
try:
req = requests.post(metadata_url, json=json)
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print(f"Error posting metadata: {err}")
# Move to writable directory as user might want to do file IO.
# TODO: won't persist across steps, might need support in SDK?
os.chdir(tempfile.mkdtemp())
# Load information about the current experiment run:
run_info = dill.loads(urlsafe_b64decode(run_info))
# Post session context to mlflow.
if len(input_context) > 0:
input_context_str = urlsafe_b64encode(input_context)
post_metadata(
{
"experiment_id": run_info["experiment_id"],
"run_id": run_info["run_id"],
"step_id": "same_step_000",
"metadata_type": "input",
"metadata_value": input_context_str,
"metadata_time": datetime.datetime.now().isoformat(),
}
)
# User code for step, which we run in its own execution frame.
user_code = f"""
import dill
# Load session context into global namespace:
if { len(input_context) } > 0:
dill.load_session("{ input_context_path }")
{dill.loads(urlsafe_b64decode("gASVGAAAAAAAAACMFHByaW50KCJIZWxsbyB3b3JsZCIplC4="))}
# Remove anything from the global namespace that cannot be serialised.
# TODO: this will include things like pandas dataframes, needs sdk support?
_bad_keys = []
_all_keys = list(globals().keys())
for k in _all_keys:
try:
dill.dumps(globals()[k])
except TypeError:
_bad_keys.append(k)
for k in _bad_keys:
del globals()[k]
# Save new session context to disk for the next component:
dill.dump_session("{output_context_path}")
"""
# Runs the user code in a new execution frame. Context from the previous
# component in the run is loaded into the session dynamically, and we run
# with a single globals() namespace to simulate top-level execution.
exec(user_code, globals(), globals())
# Post new session context to mlflow:
with Path(output_context_path).open("rb") as reader:
context = urlsafe_b64encode(reader.read())
post_metadata(
{
"experiment_id": run_info["experiment_id"],
"run_id": run_info["run_id"],
"step_id": "same_step_000",
"metadata_type": "output",
"metadata_value": context,
"metadata_time": datetime.datetime.now().isoformat(),
}
)
Python file to execute to run:
from sameproject.ops import helpers
from pathlib import Path
import importlib
import kfp
def deploy(compiled_path: Path, root_module_name: str):
with helpers.add_path(str(compiled_path)):
kfp_client = kfp.Client() # only supporting 'kubeflow' namespace
root_module = importlib.import_module(root_module_name)
return kfp_client.create_run_from_pipeline_func(
root_module.root,
arguments={},
)
Turns out it has to do with not compiling with the right execution mode on.
If you're getting this, your code should look like this.
Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile(pipeline_func=root_module.root, package_path=str(package_yaml_path))

How to avoid ImportError in twisted tac file?

I have a twisted tac file (twisted_service.py) with a code:
from twisted.application import service
# application.py file in the same dir
from .application import setup_reactor
class WebsocketService(service.Service):
def startService(self):
service.Service.startService(self)
setup_reactor()
application = service.Application("ws")
ws_service = WebsocketService()
ws_service.setServiceParent(application)
Here is application.py file, which sets up the reactor:
# -*- coding: utf-8 -*-
from twisted.web.server import Site
from twisted.web.static import Data
from twisted.internet import reactor, defer
from autobahn.twisted.resource import WebSocketResource
from autobahn.twisted.websocket import WebSocketServerFactory
from txsni.snimap import SNIMap
from txsni.maputils import Cache
from txsni.snimap import HostDirectoryMap
from twisted.python.filepath import FilePath
from tools.database.async import pg_conn
from tools.database import makedsn
from tools.config import main_db
from tools.modules.external import flask_setup
import tools.config as config
import websockethandlers as wsh
from pytrapd import TrapsListener
PROTOCOLMAP = {
'portcounters': wsh.PortCounters,
'eqcounters': wsh.EquipmentCounters,
'settings': wsh.Settings,
'refresh': wsh.Refresher,
'montraps': wsh.TrapsMonitoring,
'fdbs': wsh.FdbParser,
'portstate': wsh.PortState,
'cable': wsh.CableDiagnostic,
'eqcable': wsh.EquipmentCableDiagnostic,
'igmp': wsh.Igmp,
'ipmac': wsh.IpMac,
'lldp': wsh.LLDPParser,
'alias': wsh.AliasSetup,
'ping': wsh.Ping
}
#defer.inlineCallbacks
def setup_reactor():
flask_setup()
yield pg_conn.connect(makedsn(main_db))
root = Data("", "text/plain")
for key in PROTOCOLMAP:
factory = WebSocketServerFactory("wss://localhost:%s" % config.ws_port)
factory.protocol = PROTOCOLMAP[key]
resource = WebSocketResource(factory)
root.putChild(key, resource)
site = Site(root)
context_factory = SNIMap(
Cache(HostDirectoryMap(FilePath(config.certificates_directory)))
)
reactor.listenSSL(config.ws_port, site, context_factory)
traps_listener = TrapsListener()
traps_listener.listen_traps(config.trap_ip)
traps_listener.listen_messages(config.fifo_file)
if __name__ == '__main__':
setup_reactor()
import sys
from twisted.python import log
log.startLogging(sys.stdout)
reactor.run()
I use twistd -noy twisted_service.py command to run the twisted service. It has been working for twisted 16.3.2 version. After upgrade to any next version I got the error:
Unhandled Error
Traceback (most recent call last):
File "/home/kalombo/.virtualenvs/dev/local/lib/python2.7/site-packages/twisted/application/app.py", line 662, in run
runApp(config)
File "/home/kalombo/.virtualenvs/dev/local/lib/python2.7/site-packages/twisted/scripts/twistd.py", line 25, in runApp
_SomeApplicationRunner(config).run()
File "/home/kalombo/.virtualenvs/dev/local/lib/python2.7/site-packages/twisted/application/app.py", line 380, in run
self.application = self.createOrGetApplication()
File "/home/kalombo/.virtualenvs/dev/local/lib/python2.7/site-packages/twisted/application/app.py", line 445, in createOrGetApplication
application = getApplication(self.config, passphrase)
--- <exception caught here> ---
File "/home/kalombo/.virtualenvs/dev/local/lib/python2.7/site-packages/twisted/application/app.py", line 456, in getApplication
application = service.loadApplication(filename, style, passphrase)
File "/home/kalombo/.virtualenvs/dev/local/lib/python2.7/site-packages/twisted/application/service.py", line 412, in loadApplication
application = sob.loadValueFromFile(filename, 'application')
File "/home/kalombo/.virtualenvs/dev/local/lib/python2.7/site-packages/twisted/persisted/sob.py", line 177, in loadValueFromFile
eval(codeObj, d, d)
File "twisted_service.py", line 3, in <module>
from .application import setup_reactor
exceptions.ImportError: No module named application
How should I run the twisted or import module properly?
What the answer I found nearly yours. like the follows:
import os
import sys
sys.path = [os.path.join(os.getcwd(), '.'), ] + sys.path
just add the current working directory to the sys.path.
But I have not found more better method.... I think this not very good.
I found the answer here http://twistedmatrix.com/pipermail/twisted-python/2016-September/030783.html
It is a new feature in Twisted 16.4.0. In previous versions twistd script automatically added working dir to system paths, from 16.4.0 version i have to added it manually. It can be added something like this in twisted_service.py file:
import os
import sys
TWISTED_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(TWISTED_DIR)

RethinkDB connect AttributeError

I'm trying to make a wrapper module for the RethinkDB API and I've come across an AttributeError when importing my class(called rethinkdb.py). I'm working in a virtual machine having a shared folder 'Github'.
I do this in IPython console:
import library.api.rethinkdb as re
This is the error:
Traceback (most recent call last):
File "", line 1, in
import library.api.rethinkdb as re
File "/media/sf_GitHub/library/api/rethinkdb.py", line 51,
in
conn = Connection().connect_to_database()
File "/media/sf_GitHub/library/api/rethinkdb.py", line 48,
in connect_to_database
raise e
AttributeError: 'module' object has no attribute 'connect'
This is the code:
import rethinkdb as r #The downloaded RethinkDB module from http://rethinkdb.com/
class Connection(object):
def __init__(self, host='127.0.0.1', port=28015, database=None, authentication_key=''):
self.host = host
self.port = port
if database is None:
self.db = 'test'
self.auth_key = authentication_key
def connect_to_database(self):
try:
conn = r.connect(self.host, self.port, self.db, self.auth_key)
except Exception, e:
raise e
return conn
conn = Connection().connect_to_database()
I ran into something similar today and I noticed the authors have changed basic behavior of the API in the later versions.
From what I have tested on my machine:
v2.3.0
import rethinkdb as r
r.connect()
v2.4.1
import rethinkdb as r
rdb = r.RethinkDB()
rdb.connect()
It worked for me when I ran:
import rethinkdb as rdb
r = rdb.RethinkDB()
r.connect('localhost', 28015).repl()

Starting a database connection with sqlanydb inside a fork

Based on an example for forking, I build up this little script:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlanydb
import os
def child():
conn = sqlanydb.connect(uid='dba', pwd='sql', eng='somedb_IQ', dbn='somedb')
curs = conn.cursor()
curs.execute("""SELECT * FROM foobaa;""")
os.exit(0)
def parent():
while True:
newpid = os.fork()
if newpid == 0:
child()
else:
pids = (os.getpid(), newpid)
print "parent: %d, child: %d" % pids
if raw_input( ) == 'q': break
parent()
The intention is to do the database action inside a seperate process (big goal later is to run a huge number of queries at the same time).
But when running the script, I'm getting:
parent: 20580, child: 20587
Traceback (most recent call last):
File "connectiontest.py", line 25, in <module>
parent()
File "connectiontest.py", line 19, in parent
child()
File "connectiontest.py", line 8, in child
conn = sqlanydb.connect(uid='dba', pwd='sql', eng='somedb_IQ', dbn='somedb')
File "/usr/local/lib/python2.6/dist-packages/sqlanydb.py", line 461, in connect
return Connection(args, kwargs)
File "/usr/local/lib/python2.6/dist-packages/sqlanydb.py", line 510, in __init__
self.handleerror(*error)
File "/usr/local/lib/python2.6/dist-packages/sqlanydb.py", line 520, in handleerror
eh(self, None, errorclass, errorvalue)
File "/usr/local/lib/python2.6/dist-packages/sqlanydb.py", line 342, in standardErrorHandler
raise errorclass(errorvalue)
sqlanydb.OperationalError: Failed to initialize connection object
What did I might miss?
Since Sybase IQ is based on Sybase ASA, are you sure that you're using the proper keys for the credentials? This (albeit, old) documentation looks like it wants DSN and DSF instead of ENG and DBN.
http://dcx.sybase.com/1101/en/dbprogramming_en11/python-writing-open.html
The issue seems to don't happen by moving import sqlanydb into child()-methode. So it would look something like:
def child():
import sqlanydb
conn = sqlanydb.connect(uid='dba', pwd='sql', dsn='some_db')
curs = conn.cursor()
curs.execute("""SELECT * FROM SA100_1_1;""")
curs.close()
conn.close()
You need to hack the sqlanydb source to print out the actual error being seen. Whatever the problem is is being masked by a generic OperationalError which is not giving enough information to fix the problem. Line 510 is where you need to add a couple prints to figure out what is (not) going on.

File Too Large to import?

I just wrote this code to fetch the wireshark mac oui database, and I get the following error:
Traceback (most recent call last):
File "init.py", line 38, in <module>
main()
File "init.py", line 27, in main
import maclist
File "/home/synthetica/WiJam/maclist.py", line 23753
'FC:F6:4
however, this is NOT the contents of the file at that line. Is this a limit of the python intepreter, something I'm overlooking, or something else?
init.py:
def main():
#init
#Load config.
import localconfig
print localconfig.name
#update mac adress db, if at all possible:
try:
import maclist
except:
import urllib2
print "Fetching MAC adress db."
try:
maclist = urllib2.urlopen(localconfig.url)
else:
fl = open("maclist.py","w")
fl.write("#maclist.py generated by "+localconfig.name+"\n")
print "Generating maclist.py"
for line in maclist:
if "#" in line: line=line[:line.index("#")]
line = line.split()
if line and "-" not in line[0]:
line=[repr(part) for part in line]
line = "=".join(line)
fl.write("=".join(line.split())+"\n")
import maclist
#start browser
#start web interface
#handle web interface commands
#display web interface
if __name__=="__main__":
main()
localconfig.py
version = "0.3"
name = "Synth's WiJam (version "+version+")"
#maclist related:
url = "https://code.wireshark.org/review/gitweb?p=wireshark.git;a=blob_plain;f=manuf;hb=HEAD"
Any leads?
#bren
maclist.py: Not the full thing, heavens no. It's 20k+ lines.
'FC:E1:92'='SichuanJ'
'FC:E1:D9'='StableIm'
'FC:E2:3F'='ClayPaky'
'FC:E5:57'='Nokia'
'FC:E8:92'='Hangzhou'
'FC:ED:B9'='Arrayent'
'FC:F1:CD'='Optex-Fa'
'FC:F5:28'='ZyxelCom'
'FC:F6:47'='Fiberhom'
'FC:F8:AE'='IntelCor'
'FC:F8:B7'='TronteqE'
'FC:FA:F7'='Shanghai'
'FC:FB:FB'='Cisco'
Rewrite maclist.py to be proper python syntax, for example:
hosts={}
hosts['FC:FA:F7']='Shanghai'
hosts['FC:FB:FB']='Cisco'
and so on.

Categories