Xively: how to activate a device with the python api? - python

Since COSM has become Xively, a nice device api has been added (or was always there- not sure). The flow is
create product batch with serial numbers
activate devices using some product batch identifiers (?)
start using the device with the obtained feed/api keys
I can't figure out how to do this via the python API- are there any pointers?

This should be added to the library, but for now you can use this code to implement device activation. I have used environment variables to store product secret and device serial, but change that for anything that suites your use case. The only tricky part is that you need to call a2b_hex().
import xively
from os import environ
from hashlib import sha1
from binascii import a2b_hex
import hmac
secret = environ['XIVELY_PRODUCT_SECRET']
serial = environ['XIVELY_DEVICE_SERIAL_NUMBER']
activation = hmac.new(a2b_hex(secret), serial, sha1).hexdigest()
creds = xively.Client(key=None).get('/v2/devices/'+activation+'/activate').json()
xi_feed = xively.XivelyAPIClient(creds['apikey']).feeds.get(creds['feed_id'])
You will also need take care to store the credentials into a file, as a device can be activated only once. You will notice 403 errors if you try to run this code again and again, so do use the Xively developer workbench for deactivating the device under test (you may need to refresh the page).
Here is a fully working example using config files or environment variables:
#!/usr/bin/python
from os import environ
from hashlib import sha1
from binascii import a2b_hex
import hmac
import sys, subprocess
import ConfigParser
import xively
CONFIG_FILE = 'xively.conf'
PROVISIONING = 'PROVISIONING'
PROVISIONING_PRODUCT_SECRET = 'PRODUCT_SECRET'
PROVISIONING_DEVICE_SERIAL = 'DEVICE_SERIAL'
PROVISIONING_FEED_ID = 'FEED_ID'
PROVISIONING_API_KEY = 'API_KEY'
def get_setting(config, section, key):
try:
value = config.get(section, key)
except:
print key + " not found in config file. Using environment variable " + key + " instead."
try:
value = environ[key]
except:
print key + " not found in environment."
raise
# value defined?
if not value:
raise
return value
def xively_activate_product(secret, serial):
activation = hmac.new(a2b_hex(secret), serial, sha1).hexdigest()
creds = xively.Client(key=None).get('/v2/devices/'+activation+'/activate').json()
return creds
# main
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
try:
# see if we already have an api key and feed id
feed_id = config.get(PROVISIONING, PROVISIONING_FEED_ID)
api_key = config.get(PROVISIONING, PROVISIONING_API_KEY)
print "Provisioned product details:"
print "FEED_ID: " + str(feed_id)
print "API_KEY: " + api_key
# continue working with your activated product here
except:
print "FEED_ID and API_KEY not found. Activating product now."
# no error handling for secret- it _is_ needed
try:
secret = get_setting(config, PROVISIONING, PROVISIONING_PRODUCT_SECRET)
except:
print "Finding " + PROVISIONING_PRODUCT_SECRET + " failed. Giving up."
sys.exit(1)
try:
serial = get_setting(config, PROVISIONING, PROVISIONING_DEVICE_SERIAL)
except:
serial = subprocess.check_output("hostname", shell=True)
if not serial:
print "Fallback to hostname for " + PROVISIONING_DEVICE_SERIAL + " failed. Giving up."
sys.exit(1)
try:
creds = xively_activate_product(secret, serial)
# check if there were errors
try:
creds["errors"]
except:
pass
else:
print "Product activation failed (" + creds["title"] +": "+ creds["errors"] + ")."
sys.exit(1)
feed_id = creds['feed_id']
api_key = creds['apikey']
print "Product activation successful."
print "FEED_ID: " + str(feed_id)
print "API_KEY: " + api_key
if not config.has_section(PROVISIONING):
config.add_section(PROVISIONING)
config.set(PROVISIONING, PROVISIONING_FEED_ID, feed_id)
config.set(PROVISIONING, PROVISIONING_API_KEY, api_key)
# Writing our configuration file to 'example.cfg'
with open(CONFIG_FILE, 'wb') as configfile:
config.write(configfile)
except Exception as e:
print "Product activation failed (" + str(e) +")."
sys.exit(1)

This is another helpful class I wrote:
## Logging for debugging purposes
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import os
from os import environ
from hashlib import sha1
from binascii import a2b_hex
import hmac
import sys, subprocess
import ConfigParser
import xively
PROVISIONING = 'PROVISIONING'
PROVISIONING_PRODUCT_SECRET = 'PRODUCT_SECRET'
PROVISIONING_FEED_ID = 'FEED_ID'
PROVISIONING_API_KEY = 'API_KEY'
class XivelyManager:
def __init__(self, settings="xively.conf"):
# main
self.settings=settings
self.config = ConfigParser.RawConfigParser()
self.config.read(settings)
try:
# see if we already have an api key and feed id
self.api_key = self.get_setting(PROVISIONING, PROVISIONING_API_KEY)
self.secret = self.get_setting(PROVISIONING, PROVISIONING_PRODUCT_SECRET)
# continue working with your activated product here
except:
logger.exception( "API KEY and SECRET NOT FOUND" )
def activate_sensor(self,serial):
try:
creds = self.xively_activate_product(str(serial))
# check if there were errors
try:
creds["errors"]
except:
pass
else:
logger.exception("Product activation failed (" + creds["title"] +": "+ creds["errors"] + ").")
return False
feed_id = creds['feed_id']
api_key = creds['apikey']
if not self.config.has_section(PROVISIONING):
self.config.add_section(PROVISIONING)
if not self.config.has_section(str(serial)):
self.config.add_section(str(serial))
self.config.set(PROVISIONING, PROVISIONING_API_KEY, api_key)
self.config.set(str(serial), PROVISIONING_FEED_ID , feed_id)
# Writing our configuration file to 'xively.cfg'
with open(self.settings, 'wb') as configfile:
self.config.write(configfile)
return True
except Exception as e:
logger.exception("Product activation failed (" + str(e) +").")
return False
def get_setting(self, section, key):
try:
value = self.config.get(section, key)
except:
logger.exception( key + " not found in config file. Using environment variable " + key + " instead.")
## try:
## value = environ[key]
## except:
## logger.exception( key + " not found in environment.")
## finally:
## pass
finally:
# value defined?
if not value:
raise
return value
def get_feed(self,serial):
try:
if self.config.has_section(str(serial)):
feed_id = self.get_setting(str(serial), PROVISIONING_FEED_ID)
else:
feed_id=False
except Exception, e:
feed_id=False
finally:
return feed_id
def xively_activate_product(self, serial):
activation = hmac.new(a2b_hex(self.secret), serial, sha1).hexdigest()
creds = xively.Client(key=None).get('/v2/devices/'+activation+'/activate').json()
return creds
if __name__ == "__main__":
print "Testing Xively Manager "
settings = os.path.join(os.path.dirname(sys.argv[0]), "config", "xively.conf")
print settings
testxive=XivelyManager(settings)
#print testxive.activate_sensor(10)
print testxive.get_feed(10)
This is helpful when your internet gateway is connected to several other devices.
Your config file will be updated with stuff like this:
[PROVISIONING]
product_secret = xxxxxxxxxxxxxxxxxxxxxxxxxxxx
api_key = xxxxxxxxxxxxxxxxxxxxxxxx
[productserial1]
feed_id = xxxxxxxx
[productserial2]
feed_id = xxxxxxxx

Related

Python Imap.IMAP4_SSL Authenticate email and password in combolist error

Hello I need help with my code. It keeps giving me authentication-errors.
Can you check it out for me?
All I needed was the code to authenticate successfully and save the working login in a txt-file and the bad login (wrong password) in another txt-file. It works with smtp but keeps giving me an error on imap.
See the code below.
Thanks
The logins in accounts.txt are in the following format email:password
...
import imaplib
import ssl
import socket
import getpass
import re
import socks
import codecs
import unicodedata
import random
from multiprocessing.pool import ThreadPool
# PROXY_TYPE_HTTP
# PROXY_TYPE_SOCKS5
proxy_type = socks.PROXY_TYPE_HTTP
use_proxies = False
thead_count = 1
use_encrpytion = False
accounts = []
accounts_checked = 0
accounts_valid = []
accounts_invalid = []
proxies = []
def check_account(email, password):
try:
if (use_proxies):
proxy = random.choice(proxies)
proxy_host = proxy.split(':')[0]
proxy_port = int(proxy.split(':')[1])
socks.setdefaultproxy(proxy_type, proxy_host, proxy_port)
socks.wrapmodule(imaplib)
mailserver = imaplib.IMAP4_SSL(('mail.' + re.search('#((\w|\w[\w\-]*?\w)\.\w+)', email).group(1)), 993)
mailserver.login(str(email), str(password))
mailserver.close()
return True
except imaplib.IMAP4.error:
print ("Log in failed.")
return False
def get_status(account):
global accounts_checked, accounts
if (':' not in account):
return False
email = account.split(':')[0]
password = account.split(':')[1]
valid = check_account(email, password)
if (valid):
print("Valid: ", account)
f1 = open("connect.txt", "a+")
f1.write(account)
f1.close()
accounts_valid.append(account)
else:
f2 = open("not_connect.txt", "a+")
f2.write(account)
f2.close()
accounts_invalid.append(account)
accounts_checked += 1
print("(" + str(accounts_checked) + "/" + str(len(accounts)) + ")")
return valid
if __name__ == "__main__":
if (use_proxies):
print("Reading \"proxies.txt\"...")
with open("proxies.txt") as f:
for line in f:
if (':' in line):
proxies.append(line)
print("Found " + str(len(proxies)) + " proxies.")
print("Reading \"accounts.txt\"...")
with codecs.open("accounts.txt", encoding='utf-8') as f:
for line in f:
line = unicodedata.normalize('NFKD', line).encode('ascii','ignore').decode('ascii')
if (':' in line):
accounts.append(line.replace("\n", "").replace("\t", ""))
print("Found " + str(len(accounts)) + " accounts.")
print("Creating thread pool...")
pool = ThreadPool(thead_count)
results = pool.map(get_status, accounts)
pool.close()
pool.join()
print("Done checking, writing output...")
print("Completed!")
...
you should create a minimal example, in my case I cannot log in using
imaplib but I do not wrap with the socket stuff.. Why is the ssl
sockets not automatic?
def get_mail_client(email_address):
print(password)
mail = imaplib.IMAP4_SSL(SMTP_SERVER, SMTP_PORT)
mail.login(email_address, password)
return mail
def start(name):
# Use a breakpoint in the code line below to debug your script.
mailClient = get_mail_client(EMAIL)
status, messages = mailClient.select('INBOX')
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
print(messages)
print(messages[0])

Passing variables in python to another web platform

I have a code which requires to pass the latency, upspeed, dlspeed to another web site to display. Right now the code is as below
import datetime
import os
import sys
import shutil
import webbrowser
import tempfile
import subprocess
import json
import urllib.request
import statistics
import pymysql
import pymysql.cursors
IPERF3_WIN_PATH = "data/iperf3.exe"
HTML_TEMPLATE_PATH = "data/template.html"
IPERF3_HOST = "127.0.0.1"
RESULT_UPLOAD_URL = "UPLOAD URL"
RESULT_VIEW_URL = "VIEW URL"
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller
This is to get a path which will work with pyinstaller
"""
try:
# PyInstaller creates a temp folder and stores path in
# _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def ping(ip, tries):
""" Ping "ip" using the windows ping commmand
Return the average ping as a int
"""
res = 0
try:
output = subprocess.check_output(
["ping", "-n", str(tries), ip]).decode("utf-8")
res = int(output.split(" = ")[-1].split("ms")[0])
except subprocess.CalledProcessError:
input("Press Enter to Continue...")
sys.exit("Error while trying to ping the server, exiting")
else:
return res
def copyIperf3Exec():
""" On OSX :
Copy the iperf3 binary to a tmp file,
make it executable and return his path
This is to avoid many bundle related problems
On Windows, just return the package path """
return resource_path(IPERF3_WIN_PATH)
def get_iperf3_download():
""" Return the output of the iperf3 cli as a python dict """
ipf3_tmp = copyIperf3Exec()
try:
output = subprocess.check_output([ipf3_tmp,
"-c", IPERF3_HOST,
"-J",
"-P", "16",
"-w", "710000",
"-R"])
res_string = output.decode("utf-8")
except subprocess.CalledProcessError:
input("Press Enter to Continue...")
sys.exit("Problem while doing the test, please try again later")
else:
return json.loads(res_string)
def get_iperf3_upload():
""" Return the output of the iperf3 cli as a python dict """
ipf3_tmp = copyIperf3Exec()
try:
output = subprocess.check_output([ipf3_tmp,
"-c", IPERF3_HOST,
"-J",
"-P", "10",
"-w", "710000"])
res_string = output.decode("utf-8")
except subprocess.CalledProcessError:
input("Press Enter to Continue...")
sys.exit("Error while doing the upload test, please try again later")
else:
return json.loads(res_string)
def get_userinfos():
""" Get the 3 informations to be presented to the user
( ip, upload speed, download speed )
Return a Dictionary
"""
show_start_msg(0) # 0% Progress bar
avg_latency = ping(IPERF3_HOST, 5)
u_json = get_iperf3_upload()
show_start_msg(1) # 40%
d_json = get_iperf3_download()
show_start_msg(2) # 80%
ip = getip_apify()
u_bits_per_second = u_json['end']['sum_received']['bits_per_second']
d_bits_per_second = d_json['end']['sum_received']['bits_per_second']
u_testtime = u_json['end']['sum_received']['seconds']
d_testtime = d_json['end']['sum_received']['seconds']
u_testdate = u_json["start"]["timestamp"]["timesecs"]
d_testdate = d_json["start"]["timestamp"]["timesecs"]
res = {
'ip': ip,
'latency': avg_latency,
'upspeed': u_bits_per_second,
'dlspeed': d_bits_per_second,
'upspeedtime': u_testtime,
'dlspeedtime': d_testtime,
'upspeeddate': u_testdate,
'dlspeeddate': d_testdate
}
return res
def sendToDB(infos):
# Connect to the database
connection = pymysql.connect(host='127.0.0.1',
user='testclient',
password='password',
db='speed',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# Create a new record
def stp_date(stp):
return datetime.datetime.fromtimestamp(stp).strftime(
'%Y-%m-%d %H:%M:%S')
sql = ("INSERT INTO `speedlog`"
"(`externalIP`, `uploadspeed`, `uploadspeedtime`,"
"`uploadspeeddate`, `downloadspeed`, `downloadspeedtime`,"
"`downloadspeeddate`, `latency`)"
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)")
cursor.execute(sql,
(infos["ip"],
str(int(infos["upspeed"])),
str("{0:.2f}".format(infos["upspeedtime"])),
stp_date(infos["upspeeddate"]),
str(int(infos["dlspeed"])),
str("{0:.2f}".format(infos["dlspeedtime"])),
stp_date(infos["dlspeeddate"]),
str(int(infos["latency"]))))
# connection is not autocommit by
# default. So you must commit to save
# your changes.
connection.commit()
finally:
connection.close()
return
def getip_apify():
res = urllib.request.urlopen("http://api.ipify.org")
raw_ip = res.read()
return raw_ip.decode('utf-8')
def prepare_template(templatePath, infos):
""" Load an html located at templatePath and replace the necessary text
with the associated values from the iPerf3 infos
Return a string
"""
f_template = open(templatePath)
s_template = f_template.read()
f_template.close()
mod_template = s_template.replace("avglatency", str(int(infos['latency'])))
mod_template = mod_template.replace(
"upspeed", str("{0:.3f}".format(infos['upspeed']/(1000*1000*1000))))
mod_template = mod_template.replace(
"dlspeed", str("{0:.3f}".format(infos['dlspeed']/(1000*1000*1000))))
return mod_template
def str_to_tempHtml(str):
""" Write "str" in an .html temporary file
And return his path
"""
data = bytes(str, "utf-8")
tmp = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
tmp.write(data)
tmp.flush()
return tmp.name
def show_start_msg(progress):
if sys.platform.startswith('darwin'):
unused = os.system('clear')
elif sys.platform.startswith('win32'):
unused = os.system('cls')
print("="*70)
print("Speed Testing for 10G Network \n")
print("Powered by iPerf3")
print("="*70)
if progress == -1:
input("Press Enter to Continue...\n")
return
else:
print("Press Enter to Continue...\n")
print("Testing in progress")
if progress == 0:
print("[" + " "*68 + "]" + " 0%")
elif progress == 1:
print("[" + "#" * 27 + " " * 41 + "]" + " 40%")
elif progress == 2:
print("[" + "#" * 54 + " " * 14 + "]" + " 80%")
elif progress == 3:
print("[" + "#"*68 + "]" + " 100%")
print("Completed")
if __name__ == '__main__':
show_start_msg(-1)
infos = get_userinfos()
sendToDB(infos)
show_start_msg(3) # 100% Complete
data = { "key":"Jasdkjfhsda349*lio34sdfFdslaPisdf",
"download":"2048000",
"upload":"2048000",
"latency":"10"}
req = urllib.request.Request(RESULT_UPLOAD_URL, json.dumps(data).encode(
'ascii'))
req.add_header('Content-Type', 'application/json')
resp = urllib.request.urlopen(req).read().decode('ascii')
resp = resp.replace('\'', '"')
webbrowser.open(RESULT_VIEW_URL.format(json.loads(resp)['test_id']))
input("Press Enter to Continue...")
My latency, upspeed and dlspeed variables are stored as infos, and later sent over to the DB for recording via sendtoDB(infos).
The next part is to also pass these sets of variables to another web using RESTful, which in the data, the first attribute "key" is the REST key for authentication, followed by the rest of the values like latency, downloadspeed and uploadspeed. However, you can see that in the data, all the 3 variables are hard-coded value instead of the values derived from the test, which is latency, upspeedand dlspeed.
How can I modify the code to get these attributes instead of the hardcoded ones?
You have a method that returns this dictionary...
res = {
'ip': ip,
'latency': avg_latency,
'upspeed': u_bits_per_second,
'dlspeed': d_bits_per_second,
'upspeedtime': u_testtime,
'dlspeedtime': d_testtime,
'upspeeddate': u_testdate,
'dlspeeddate': d_testdate
}
And it is called infos, so use it
data = { "key":"xxxxxxxx",
"download":infos['dlspeed']
"upload":infos['upspeed'],
"latency":infos['latency']}

Multiprocessing Python Bottle Multiple Commands independent of a running pool.apply_async

The Problem, I made a rest api out of bottle to start Elasticsearch bulk loads. The bulk load process runs inside of a multiprocess pool, the problem is while that is running the api wont except any other commands.
I've even tried running another instance of the api on a different port but I have stack question about that too. The second one doesn't accept commands.
I want to be able to call a different api command to get the status of the load and return it. Currently it just includes ES data, but eventually its going to include each nodes stats. This is designed to run from Jenkins and initiate parallel loads.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright [current year] the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import RequestError
from subprocess import Popen, PIPE
from multiprocessing import Pool, Process, pool
from datetime import datetime
import boto3
import sys
import os
import argparse
import logging
import logging.config
from bottle import route, run
from boto.cloudformation.stack import Output
import json
#this is what is called to set up the loading process from the api.
def start_load(secret, access, protocol, host, ports, index, type, mapping, data,threads):
# decompress a gzip string
def decompress_gzip(data):
return Popen(['zcat'], stdout=PIPE, stdin=PIPE).communicate(input=data)[0]
# parse an s3 path into a bucket and key 's3://my-bucket/path/to/data' -> ('my-bucket', 'path/to/data')
def parse_s3_path(str):
_, _, bucket, key = str.split('/', 3)
return (bucket, key)
def shell_command_execute(command):
p = Popen(command, stdout=PIPE, shell=True)
(output, err) = p.communicate()
return output
# load an S3 file to elasticsearch
def load_s3_file(s3_bucket, s3_key, es_host, es_port, es_index, es_type, access, secret):
try:
logging.info('loading s3://%s/%s', s3_bucket, s3_key)
s3 = boto3.client('s3', aws_access_key_id=access, aws_secret_access_key=secret)
file_handle = s3.get_object(Bucket=s3_bucket, Key=s3_key)
file_contents = file_handle['Body'].read()
logging.info('%s'%s3_key)
if file_contents:
if s3_key.endswith('.gz'):
file_contents = decompress_gzip(file_contents)
es = Elasticsearch(host=es_host, port=es_port, timeout=180)
es.bulk(body=file_contents, index=es_index, doc_type=es_type, timeout=120)
except Exception as e:
logging.error("There has been a major error %s" % e)
# load an S3 file to elasticsearch
def load_single_s3_file(s3_bucket, s3_key, es_host, es_port, es_index, es_type, access, secret):
try:
logging.info('loading s3://%s/%s', s3_bucket, s3_key)
s3 = boto3.client('s3', aws_access_key_id=access, aws_secret_access_key=secret)
file_handle = s3.get_object(Bucket=s3_bucket, Key=s3_key)
file_contents = file_handle['Body'].read()
logging.info('%s'%s3_key)
if file_contents:
if s3_key.endswith('.gz'):
file_contents = decompress_gzip(file_contents)
es = Elasticsearch(host=es_host, port=es_port, timeout=180)
res = es.get(index="test-index", doc_type='tweet', id=1)
es.insert(body = file_contents, index = es_index, doc_type=es_type, timeout=120)
except Exception as e:
logging.error("There has been a major error %s" % e)
start = datetime.now()
es_url = protocol + '://' + host + ':' + str(ports) + '/' + index + '/' + type
es = Elasticsearch(host=host, port=ports, timeout=180)
# S3 file - https://boto3.readthedocs.org/en/latest/reference/services/s3.html#object
s3 = boto3.client('s3', aws_access_key_id=access, aws_secret_access_key=secret)
s3_bucket, s3_key = parse_s3_path(mapping)
file_handle = s3.get_object(Bucket=s3_bucket, Key=s3_key)
mapping = file_handle['Body'].read()
try:
es.indices.create(index=index, body=mapping)
except:
logging.error('index exist')
logging.info('starting to load %s to %s', data, es_url)
es.indices.put_settings({'index': {'refresh_interval': '-1'}}, index=index)
pool = Pool(processes=int(threads))
s3 = boto3.resource('s3', aws_access_key_id=access, aws_secret_access_key=secret)
s3_bucket, s3_key = parse_s3_path(data)
for file_summary in s3.Bucket(s3_bucket).objects.all():
if file_summary.key.startswith(s3_key):
pool.apply_async(load_s3_file, args=(s3_bucket, file_summary.key, host, ports, index, type, access, secret))
pool.close()
pool.join()
es.indices.put_settings({'index': {'refresh_interval': '1s'}}, index=index)
logging.info('finished loading %s to %s in %s', data, es_url, str(datetime.now() - start))
sys.exit(0)
#reset_es_settings(host, ports)
#This is what is called when no arguments are given
#route('/load_data/')
def no_comands():
return """Please include all nessecary values: example:
Start Load
http://127.0.0.1:8001/load_data/load&host=ip or DNS&thread=5&mappinglocation=tr-ips-ses-data|mappings|version_1_2|wos.mapping&datalocation=tr-ips-ses-data|json-data|wos|20150724|wos-1&port=9200&index=wos4&protocol=http&type=wos&access=access_key&secret=secret_key
Delete Index
http://127.0.0.1:8001/delete/wos4&host=ip or DNS&port=9200
with loading you must specify the load command as shown above
use & to seperate values
use = to seperate key value pairs
use | to insert \
"""
#route('/load_data/<name>', method='GET')
def commands( name="Execute Load" ):
values = name.split('&')
#split apart the url syntax items are split by & key values by = and any plcae that needs \ gets |
try:
command = values[0]
host = values[1] + ".us-west-2.elb.amazonaws.com"
threads = values[2]
mapping_location = values[3].replace('|', '/')
data_location = values[4].replace('|', '/')
#mapping_location = values[3]
#data_location = values[4]
ports = values[5]
index = values[6]
protocol = values[7]
type = values[8]
access = values[9]
secret = values[10]
host = host.split('=')[1]
threads = threads.split('=')[1]
mapping_location = "s3://" + mapping_location.split('=')[1]
data_location = "s3://" + data_location.split('=')[1]
ports = ports.split('=')[1]
index = index.split('=')[1]
protocol = protocol.split('=')[1]
types = type.split('=')[1]
access = access.split('=')[1]
secret = secret.split('=')[1]
yield ("Starting Load of data use /get_status/es_url&es_port&index to get the status of your load.")
start_load(secret, access, protocol, host, ports, index, types, mapping_location, data_location,threads)
except Exception as e:
logging.error(e)
yield """Please include all nessecary values: example:
Start Load
http://127.0.0.1:8001/load_data/load&host=ip or DNS&thread=5&mappinglocation=tr-ips-ses-data|mappings|version_1_2|wos.mapping&datalocation=tr-ips-ses-data|json-data|wos|20150724|wos-1&port=9200&index=wos4&protocol=http&type=wos&access=access_key&secret=secret_key
Delete Index
http://127.0.0.1:8001/delete/wos4&host=ip or DNS&port=9200
with loading you must specify the load command as shown above
use & to seperate values
use = to seperate key value pairs
use | to insert \
"""
#This is what is cvalled when /delete/ is used.
#route('/delete/<name>', method='GET' )
def recipe_delete( name="Delete Index" ):
def shell_command_execute(command):
p = Popen(command, stdout=PIPE, shell=True)
(output, err) = p.communicate()
return output
values = name.split('&')
try:
#split apart the url syntax items are split by & key values by |
index = values[0]
host = values[1] + ".us-west-2.elb.amazonaws.com"
host = host.split('=')[1]
port = values[2]
port = port.split('=')[1]
except Exception as e:
logging.error(e)
return """Please include all nessecary values: example:
Start Load
http://127.0.0.1:8001/load_data/load&host=ip or DNS&thread=5&mappinglocation=tr-ips-ses-data|mappings|version_1_2|wos.mapping&datalocation=tr-ips-ses-data|json-data|wos|20150724|wos-1&port=9200&index=wos4&protocol=http&type=wos&access=access_key&secret=secret_key
Delete Index
http://127.0.0.1:8001/delete/wos4&host=ip or DNS&port=9200
with loading you must specify the load command as shown above
use & to seperate values
use = to seperate key value pairs
use | to insert \
"""
try:
#This is the command that deletes the index.
curl_command = 'curl -XDELETE http://' + host + ':9200/' + index
shell_command_execute(curl_command)
return "Successfully Deleted Index"
except Exception as e:
logging.error(e)
return "Failed to Deleted Index %s" % e
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf8')
url = os.path.dirname(os.path.realpath(__file__)) + '/logging.ini'
print url
logging.config.fileConfig(url)
run(host='172.31.28.189', port=8001, debug=True)
#run(host='127.0.0.1', port=8001, debug=True)

Python script is exiting with no output and I have no idea why

I'm attempting to debug a Subversion post-commit hook that calls some python scripts. What I've been able to determine so far is that when I run post-commit.bat manually (I've created a wrapper for it to make it easier) everything succeeds, but when SVN runs it one particular step doesn't work.
We're using CollabNet SVNServe, which I know from the documentation removes all environment variables. This had caused some problems earlier, but shouldn't be an issue now.
Before Subversion calls a hook script, it removes all variables - including $PATH on Unix, and %PATH% on Windows - from the environment. Therefore, your script can only run another program if you spell out that program's absolute name.
The relevant portion of post-commit.bat is:
echo -------------------------- >> c:\svn-repos\company\hooks\svn2ftp.out.log
set SITENAME=staging
set SVNPATH=branches/staging/wwwroot/
"C:\Python3\python.exe" C:\svn-repos\company\hooks\svn2ftp.py ^
--svnUser="svnusername" ^
--svnPass="svnpassword" ^
--ftp-user=ftpuser ^
--ftp-password=ftppassword ^
--ftp-remote-dir=/ ^
--access-url=svn://10.0.100.6/company ^
--status-file="C:\svn-repos\company\hooks\svn2ftp-%SITENAME%.dat" ^
--project-directory=%SVNPATH% "staging.company.com" %1 %2 >> c:\svn-repos\company\hooks\svn2ftp.out.log
echo -------------------------- >> c:\svn-repos\company\hooks\svn2ftp.out.log
When I run post-commit.bat manually, for example: post-commit c:\svn-repos\company 12345, I see output like the following in svn2ftp.out.log:
--------------------------
args1: c:\svn-repos\company
args0: staging.company.com
abspath: c:\svn-repos\company
project_dir: branches/staging/wwwroot/
local_repos_path: c:\svn-repos\company
getting youngest revision...
done, up-to-date
--------------------------
However, when I commit something to the repo and it runs automatically, the output is:
--------------------------
--------------------------
svn2ftp.py is a bit long, so I apologize but here goes. I'll have some notes/disclaimers about its contents below it.
#!/usr/bin/env python
"""Usage: svn2ftp.py [OPTION...] FTP-HOST REPOS-PATH
Upload to FTP-HOST changes committed to the Subversion repository at
REPOS-PATH. Uses svn diff --summarize to only propagate the changed files
Options:
-?, --help Show this help message.
-u, --ftp-user=USER The username for the FTP server. Default: 'anonymous'
-p, --ftp-password=P The password for the FTP server. Default: '#'
-P, --ftp-port=X Port number for the FTP server. Default: 21
-r, --ftp-remote-dir=DIR The remote directory that is expected to resemble the
repository project directory
-a, --access-url=URL This is the URL that should be used when trying to SVN
export files so that they can be uploaded to the FTP
server
-s, --status-file=PATH Required. This script needs to store the last
successful revision that was transferred to the
server. PATH is the location of this file.
-d, --project-directory=DIR If the project you are interested in sending to
the FTP server is not under the root of the
repository (/), set this parameter.
Example: -d 'project1/trunk/'
This should NOT start with a '/'.
2008.5.2 CKS
Fixed possible Windows-related bug with tempfile, where the script didn't have
permission to write to the tempfile. Replaced this with a open()-created file
created in the CWD.
2008.5.13 CKS
Added error logging. Added exception for file-not-found errors when deleting files.
2008.5.14 CKS
Change file open to 'rb' mode, to prevent Python's universal newline support from
stripping CR characters, causing later comparisons between FTP and SVN to report changes.
"""
try:
import sys, os
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='svn2ftp.debug.log',
filemode='a'
)
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
logging.getLogger('').addHandler(console)
import getopt, tempfile, smtplib, traceback, subprocess
from io import StringIO
import pysvn
import ftplib
import inspect
except Exception as e:
logging.error(e)
#capture the location of the error
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logging.debug(stack_trace)
print(stack_trace)
#end capture
sys.exit(1)
#defaults
host = ""
user = "anonymous"
password = "#"
port = 21
repo_path = ""
local_repos_path = ""
status_file = ""
project_directory = ""
remote_base_directory = ""
toAddrs = "developers#company.com"
youngest_revision = ""
def email(toAddrs, message, subject, fromAddr='autonote#company.com'):
headers = "From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n" % (fromAddr, toAddrs, subject)
message = headers + message
logging.info('sending email to %s...' % toAddrs)
server = smtplib.SMTP('smtp.company.com')
server.set_debuglevel(1)
server.sendmail(fromAddr, toAddrs, message)
server.quit()
logging.info('email sent')
def captureErrorMessage(e):
sout = StringIO()
traceback.print_exc(file=sout)
errorMessage = '\n'+('*'*80)+('\n%s'%e)+('\n%s\n'%sout.getvalue())+('*'*80)
return errorMessage
def usage_and_exit(errmsg):
"""Print a usage message, plus an ERRMSG (if provided), then exit.
If ERRMSG is provided, the usage message is printed to stderr and
the script exits with a non-zero error code. Otherwise, the usage
message goes to stdout, and the script exits with a zero
errorcode."""
if errmsg is None:
stream = sys.stdout
else:
stream = sys.stderr
print(__doc__, file=stream)
if errmsg:
print("\nError: %s" % (errmsg), file=stream)
sys.exit(2)
sys.exit(0)
def read_args():
global host
global user
global password
global port
global repo_path
global local_repos_path
global status_file
global project_directory
global remote_base_directory
global youngest_revision
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "?u:p:P:r:a:s:d:SU:SP:",
["help",
"ftp-user=",
"ftp-password=",
"ftp-port=",
"ftp-remote-dir=",
"access-url=",
"status-file=",
"project-directory=",
"svnUser=",
"svnPass="
])
except getopt.GetoptError as msg:
usage_and_exit(msg)
for opt, arg in opts:
if opt in ("-?", "--help"):
usage_and_exit()
elif opt in ("-u", "--ftp-user"):
user = arg
elif opt in ("-p", "--ftp-password"):
password = arg
elif opt in ("-SU", "--svnUser"):
svnUser = arg
elif opt in ("-SP", "--svnPass"):
svnPass = arg
elif opt in ("-P", "--ftp-port"):
try:
port = int(arg)
except ValueError as msg:
usage_and_exit("Invalid value '%s' for --ftp-port." % (arg))
if port < 1 or port > 65535:
usage_and_exit("Value for --ftp-port must be a positive integer less than 65536.")
elif opt in ("-r", "--ftp-remote-dir"):
remote_base_directory = arg
elif opt in ("-a", "--access-url"):
repo_path = arg
elif opt in ("-s", "--status-file"):
status_file = os.path.abspath(arg)
elif opt in ("-d", "--project-directory"):
project_directory = arg
if len(args) != 3:
print(str(args))
usage_and_exit("host and/or local_repos_path not specified (" + len(args) + ")")
host = args[0]
print("args1: " + args[1])
print("args0: " + args[0])
print("abspath: " + os.path.abspath(args[1]))
local_repos_path = os.path.abspath(args[1])
print('project_dir:',project_directory)
youngest_revision = int(args[2])
if status_file == "" : usage_and_exit("No status file specified")
def main():
global host
global user
global password
global port
global repo_path
global local_repos_path
global status_file
global project_directory
global remote_base_directory
global youngest_revision
read_args()
#repository,fs_ptr
#get youngest revision
print("local_repos_path: " + local_repos_path)
print('getting youngest revision...')
#youngest_revision = fs.youngest_rev(fs_ptr)
assert youngest_revision, "Unable to lookup youngest revision."
last_sent_revision = get_last_revision()
if youngest_revision == last_sent_revision:
# no need to continue. we should be up to date.
print('done, up-to-date')
return
if last_sent_revision or youngest_revision < 10:
# Only compare revisions if the DAT file contains a valid
# revision number. Otherwise we risk waiting forever while
# we parse and uploading every revision in the repo in the case
# where a repository is retroactively configured to sync with ftp.
pysvn_client = pysvn.Client()
pysvn_client.callback_get_login = get_login
rev1 = pysvn.Revision(pysvn.opt_revision_kind.number, last_sent_revision)
rev2 = pysvn.Revision(pysvn.opt_revision_kind.number, youngest_revision)
summary = pysvn_client.diff_summarize(repo_path, rev1, repo_path, rev2, True, False)
print('summary len:',len(summary))
if len(summary) > 0 :
print('connecting to %s...' % host)
ftp = FTPClient(host, user, password)
print('connected to %s' % host)
ftp.base_path = remote_base_directory
print('set remote base directory to %s' % remote_base_directory)
#iterate through all the differences between revisions
for change in summary :
#determine whether the path of the change is relevant to the path that is being sent, and modify the path as appropriate.
print('change path:',change.path)
ftp_relative_path = apply_basedir(change.path)
print('ftp rel path:',ftp_relative_path)
#only try to sync path if the path is in our project_directory
if ftp_relative_path != "" :
is_file = (change.node_kind == pysvn.node_kind.file)
if str(change.summarize_kind) == "delete" :
print("deleting: " + ftp_relative_path)
try:
ftp.delete_path("/" + ftp_relative_path, is_file)
except ftplib.error_perm as e:
if 'cannot find the' in str(e) or 'not found' in str(e):
# Log, but otherwise ignore path-not-found errors
# when deleting, since it's not a disaster if the file
# we want to delete is already gone.
logging.error(captureErrorMessage(e))
else:
raise
elif str(change.summarize_kind) == "added" or str(change.summarize_kind) == "modified" :
local_file = ""
if is_file :
local_file = svn_export_temp(pysvn_client, repo_path, rev2, change.path)
print("uploading file: " + ftp_relative_path)
ftp.upload_path("/" + ftp_relative_path, is_file, local_file)
if is_file :
os.remove(local_file)
elif str(change.summarize_kind) == "normal" :
print("skipping 'normal' element: " + ftp_relative_path)
else :
raise str("Unknown change summarize kind: " + str(change.summarize_kind) + ", path: " + ftp_relative_path)
ftp.close()
#write back the last revision that was synced
print("writing last revision: " + str(youngest_revision))
set_last_revision(youngest_revision) # todo: undo
def get_login(a,b,c,d):
#arguments don't matter, we're always going to return the same thing
try:
return True, "svnUsername", "svnPassword", True
except Exception as e:
logging.error(e)
#capture the location of the error
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logging.debug(stack_trace)
#end capture
sys.exit(1)
#functions for persisting the last successfully synced revision
def get_last_revision():
if os.path.isfile(status_file) :
f=open(status_file, 'r')
line = f.readline()
f.close()
try: i = int(line)
except ValueError:
i = 0
else:
i = 0
f = open(status_file, 'w')
f.write(str(i))
f.close()
return i
def set_last_revision(rev) :
f = open(status_file, 'w')
f.write(str(rev))
f.close()
#augmented ftp client class that can work off a base directory
class FTPClient(ftplib.FTP) :
def __init__(self, host, username, password) :
self.base_path = ""
self.current_path = ""
ftplib.FTP.__init__(self, host, username, password)
def cwd(self, path) :
debug_path = path
if self.current_path == "" :
self.current_path = self.pwd()
print("pwd: " + self.current_path)
if not os.path.isabs(path) :
debug_path = self.base_path + "<" + path
path = os.path.join(self.current_path, path)
elif self.base_path != "" :
debug_path = self.base_path + ">" + path.lstrip("/")
path = os.path.join(self.base_path, path.lstrip("/"))
path = os.path.normpath(path)
#by this point the path should be absolute.
if path != self.current_path :
print("change from " + self.current_path + " to " + debug_path)
ftplib.FTP.cwd(self, path)
self.current_path = path
else :
print("staying put : " + self.current_path)
def cd_or_create(self, path) :
assert os.path.isabs(path), "absolute path expected (" + path + ")"
try: self.cwd(path)
except ftplib.error_perm as e:
for folder in path.split('/'):
if folder == "" :
self.cwd("/")
continue
try: self.cwd(folder)
except:
print("mkd: (" + path + "):" + folder)
self.mkd(folder)
self.cwd(folder)
def upload_path(self, path, is_file, local_path) :
if is_file:
(path, filename) = os.path.split(path)
self.cd_or_create(path)
# Use read-binary to avoid universal newline support from stripping CR characters.
f = open(local_path, 'rb')
self.storbinary("STOR " + filename, f)
f.close()
else:
self.cd_or_create(path)
def delete_path(self, path, is_file) :
(path, filename) = os.path.split(path)
print("trying to delete: " + path + ", " + filename)
self.cwd(path)
try:
if is_file :
self.delete(filename)
else:
self.delete_path_recursive(filename)
except ftplib.error_perm as e:
if 'The system cannot find the' in str(e) or '550 File not found' in str(e):
# Log, but otherwise ignore path-not-found errors
# when deleting, since it's not a disaster if the file
# we want to delete is already gone.
logging.error(captureErrorMessage(e))
else:
raise
def delete_path_recursive(self, path):
if path == "/" :
raise "WARNING: trying to delete '/'!"
for node in self.nlst(path) :
if node == path :
#it's a file. delete and return
self.delete(path)
return
if node != "." and node != ".." :
self.delete_path_recursive(os.path.join(path, node))
try: self.rmd(path)
except ftplib.error_perm as msg :
sys.stderr.write("Error deleting directory " + os.path.join(self.current_path, path) + " : " + str(msg))
# apply the project_directory setting
def apply_basedir(path) :
#remove any leading stuff (in this case, "trunk/") and decide whether file should be propagated
if not path.startswith(project_directory) :
return ""
return path.replace(project_directory, "", 1)
def svn_export_temp(pysvn_client, base_path, rev, path) :
# Causes access denied error. Couldn't deduce Windows-perm issue.
# It's possible Python isn't garbage-collecting the open file-handle in time for pysvn to re-open it.
# Regardless, just generating a simple filename seems to work.
#(fd, dest_path) = tempfile.mkstemp()
dest_path = tmpName = '%s.tmp' % __file__
exportPath = os.path.join(base_path, path).replace('\\','/')
print('exporting %s to %s' % (exportPath, dest_path))
pysvn_client.export( exportPath,
dest_path,
force=False,
revision=rev,
native_eol=None,
ignore_externals=False,
recurse=True,
peg_revision=rev )
return dest_path
if __name__ == "__main__":
logging.info('svnftp.start')
try:
main()
logging.info('svnftp.done')
except Exception as e:
# capture the location of the error for debug purposes
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logging.debug(stack_trace[:-1])
print(stack_trace)
# end capture
error_text = '\nFATAL EXCEPTION!!!\n'+captureErrorMessage(e)
subject = "ALERT: SVN2FTP Error"
message = """An Error occurred while trying to FTP an SVN commit.
repo_path = %(repo_path)s\n
local_repos_path = %(local_repos_path)s\n
project_directory = %(project_directory)s\n
remote_base_directory = %(remote_base_directory)s\n
error_text = %(error_text)s
""" % globals()
email(toAddrs, message, subject)
logging.error(e)
Notes/Disclaimers:
I have basically no python training so I'm learning as I go and spending lots of time reading docs to figure stuff out.
The body of get_login is in a try block because I was getting strange errors saying there was an unhandled exception in callback_get_login. Never figured out why, but it seems fine now. Let sleeping dogs lie, right?
The username and password for get_login are currently hard-coded (but correct) just to eliminate variables and try to change as little as possible at once. (I added the svnuser and svnpass arguments to the existing argument parsing.)
So that's where I am. I can't figure out why on earth it's not printing anything into svn2ftp.out.log. If you're wondering, the output for one of these failed attempts in svn2ftp.debug.log is:
2012-09-06 15:18:12,496 INFO svnftp.start
2012-09-06 15:18:12,496 INFO svnftp.done
And it's no different on a successful run. So there's nothing useful being logged.
I'm lost. I've gone way down the rabbit hole on this one, and don't know where to go from here. Any ideas?
It looks as if you are overwriting your logging level. Try setting both to DEBUG and see what happens.
import sys, os
import logging
logging.basicConfig(
level=logging.DEBUG, # DEBUG here
format='%(asctime)s %(levelname)s %(message)s',
filename='svn2ftp.debug.log',
filemode='a'
)
console = logging.StreamHandler()
console.setLevel(logging.ERROR) # ERROR here
logging.getLogger('').addHandler(console)
Additionally you are printing in some places and logging in others. I am not sure that the logging library automatically redirects sys.stdout to the logging console. I would convert all print statements to logging statements to be consistent.

Edit ini file option values with ConfigParser (Python)

Anyone know how'd I'd go about editing ini file values preferably using ConfigParser? (Or even a place to start from would be great!) I've got lots of comments throughout my config file so I'd like to keep them by just editing the values, not taking the values and playing around with multiple files.
Structure of my config file:
[name1]
URL = http://example.com
username = dog
password = password
[name2]
URL = http://catlover.com
username = cat
password = adffa
As you can see, I've got the same options for different section names, so editing just the values for one section is a bit trickier if ConfigParser can't do it.
Thanks in advance.
Here is an example
import sys
import os.path
from ConfigParser import RawConfigParser as ConfParser
from ConfigParser import Error
p = ConfParser()
# this happend to me save as ASCII
o = open("config.ini")
if o.read().startswith("\xef\xbb\xbf"):
print "Fatal Error; Please save the file as ASCII not unicode."
sys.exit()
try:
results = p.read("config.ini")
except Error, msg:
print "Error Parsing File"
print msg
else:
if results == []:
print "Could not load config.ini."
if not os.path.exists("config.ini"):
print "config.ini does not exist."
else:
print "An uknown error occurred."
else:
print "Config Details"
sections = p.sections()
sections.sort()
for s in sections:
print "------------------------"
print s
if p.has_option(s, "URL"):
print "URL: ",
print p.get(s, "URL")
else:
print "URL: No Entry"
if p.has_option(s, "username"):
print "User: ",
print p.get(s, "username")
else:
print "User: N/A"
if p.has_option(s, "password"):
print "Password: ",
print p.get(s, "password")
else:
print "Password: N/A"
Also I created this class to store my apps variables etc and also make config writing easier it was originally used with twisted but I created a simple replacement logger
import os.path
import sys
#from twisted.python import log
import ConfigParser
from traceback import print_last
class Log(object):
def msg(t):
print "Logger: %s " % t
def err(t = None):
print "-------------Error-----------"
print "\n\n"
if t is None:
print_last()
# sloppy replacement for twisted's logging functions
log = Log()
class Settings(object):
'''Stores settings'''
config_variables = ['variables_that_should_be_stored_in_config']
def __init__(self, main_folder = None, log_file = None, music_folder = None ):
# load the defaults then see if there are updates ones in the config
self.load_defaults()
self.config = ConfigParser.RawConfigParser()
if len(self.config.read(self.settings_file)) == 1:
if 'Settings' in self.config.sections():
try:
self.music_folder = self.config.get('Settings', 'music_folder')
except ConfigParser.NoOptionError:
pass
log.msg('Music Folder: %s' % self.music_folder)
try:
self.mplayer = self.config.get('Settings', 'mplayer')
except ConfigParser.NoOptionError:
pass
try:
self.eula = self.config.getboolean('Settings', 'eula')
except ConfigParser.NoOptionError:
pass
else:
log.msg('No Settings Section; Defaults Loaded')
else:
log.msg('Settings at default')
def load_defaults(self):
log.msg('Loading Defaults')
self.main_folder = os.path.dirname(os.path.abspath(sys.argv[0]))
self.settings_file = os.path.join(self.main_folder, 'settings.cfg')
self.log_file = os.path.join(self.main_folder, 'grooveshark.log')
self.music_folder = os.path.join(self.main_folder, 'Music')
self.grooveshark_started = False
self.eula = False
self.download_percent = 0.5# default buffer percent is 50 %
if sys.platform == 'win32' or sys.platform == 'cygwin':# Windows
if os.path.exists( os.path.join(self.main_folder, 'mplayer', 'mplayer.exe') ):
self.mplayer = os.path.join(self.main_folder, 'mplayer', 'mplayer.exe')
elif os.path.exists( os.path.join(self.main_folder, '/mplayer.exe') ):
self.mplayer = os.path.join(self.main_folder, '/mplayer.exe')
else:
self.mplayer = 'download'
elif sys.platform == 'darwin':# Mac
if os.path.exists( os.path.join(self.main_folder, 'mplayer/mplayer.app') ):
self.mplayer = os.path.join(self.main_folder, 'mplayer/mplayer.app')
elif os.path.exists( os.path.join(self.main_folder, '/mplayer.app') ):
self.mplayer = os.path.join(self.main_folder, '/mplayer.app')
else:
self.mplayer = 'download'
else:# linux
# download or navigate to it
self.mplayer = 'download'
# Create Music Folder if it does not exist
if not os.path.exists(self.music_folder):
os.makedirs(self.music_folder)
# Create log file if it does not exist
if not os.path.exists(self.log_file):
l = open(self.log_file, 'wb')
l.close()
log.msg('Application Folder: %s' % self.main_folder)
log.msg('Log File: %s' % self.log_file)
log.msg('Music Folder: %s' % self.music_folder)
def __setattr__(self, variable, value):
log.msg('Setting %s to %s' % (variable, value))
object.__setattr__(self, variable, value)
if variable in self.config_variables:
try:
self.config.set('Settings', variable, value)
except:
# Means config wasn't created then, could be we were trying to set self.config (in which case self.config wasn't set yet because we were trying to set it)
log.err()
else:
# UPDATE settings file
log.msg('Saving Settings to %s' % (self.settings_file))
try:
self.config.write( open(self.settings_file, 'wb') )
except:
log.err()

Categories