Stopping the pipette with user input (opentrons) - python

Working on a biotech research project where a robot is doing the dilution and I was having trouble configuring the code so that the pipette stops a specific column. Ideally we want the code to ask the user for which column the pipette to stop at.
More info on the API: https://protocols.opentrons.com/protocol/customizable_serial_dilution_ot2
def get_values(*names):
import json
_all_values = json.loads("""{"pipette_type":"p300_single_gen2","mount_side":"right","tip_type":"standard","trough_type":"nest_12_reservoir_15ml","plate_type":"nest_96_wellplate_200ul_flat","dilution_factor":3,"num_of_dilutions":10,"total_mixing_volume":150,"blank_on":true,"tip_use_strategy":"never","air_gap_volume":10}""")
return [_all_values[n] for n in names]
"""DETAILS."""
metadata = {
'protocolName': 'Customizable Serial Dilution',
'author': 'Opentrons <protocols#opentrons.com>',
'source': 'Protocol Library',
'apiLevel': '2.11'
}
def run(protocol_context):
"""PROTOCOL BODY."""
[pipette_type, mount_side, tip_type, trough_type, plate_type,
dilution_factor, num_of_dilutions, total_mixing_volume,
blank_on, tip_use_strategy, air_gap_volume] = get_values( # noqa: F821
'pipette_type', 'mount_side', 'tip_type', 'trough_type',
'plate_type', 'dilution_factor', 'num_of_dilutions',
'total_mixing_volume', 'blank_on',
'tip_use_strategy', 'air_gap_volume'
)
# check for bad setup here
if not 1 <= num_of_dilutions <= 11:
raise Exception('Enter a number of dilutions between 1 and 11')
if num_of_dilutions == 11 and blank_on == 1:
raise Exception(
'No room for blank with 11 dilutions')
pip_range = pipette_type.split('_')[0].lower()
tiprack_map = {
'p10': {
'standard': 'opentrons_96_tiprack_10ul',
'filter': 'opentrons_96_filtertiprack_20ul'
},
'p20': {
'standard': 'opentrons_96_tiprack_20ul',
'filter': 'opentrons_96_filtertiprack_20ul'
},
'p50': {
'standard': 'opentrons_96_tiprack_300ul',
'filter': 'opentrons_96_filtertiprack_200ul'
},
'p300': {
'standard': 'opentrons_96_tiprack_300ul',
'filter': 'opentrons_96_filtertiprack_200ul'
},
'p1000': {
'standard': 'opentrons_96_tiprack_1000ul',
'filter': 'opentrons_96_filtertiprack_1000ul'
}
}
# labware
trough = protocol_context.load_labware(
trough_type, '2')
plate = protocol_context.load_labware(
plate_type, '3')
tip_name = tiprack_map[pip_range][tip_type]
tipracks = [
protocol_context.load_labware(tip_name, slot)
for slot in ['1', '4']
]
print(mount_side)
# pipette
pipette = protocol_context.load_instrument(
pipette_type, mount_side, tipracks)
# reagents
diluent = trough.wells()[0]
transfer_volume = total_mixing_volume/dilution_factor
diluent_volume = total_mixing_volume - transfer_volume
if 'multi' in pipette_type:
dilution_destination_sets = [
[row] for row in plate.rows()[0][1:num_of_dilutions]]
dilution_source_sets = [
[row] for row in plate.rows()[0][:num_of_dilutions-1]]
blank_set = [plate.rows()[0][num_of_dilutions+1]]
else:
dilution_destination_sets = plate.columns()[1:num_of_dilutions]
dilution_source_sets = plate.columns()[:num_of_dilutions-1]
blank_set = plate.columns()[num_of_dilutions+1]
all_diluent_destinations = [
well for set in dilution_destination_sets for well in set]
pipette.pick_up_tip()
for dest in all_diluent_destinations:
# Distribute diluent across the plate to the the number of samples
# And add diluent to one column after the number of samples for a blank
pipette.transfer(
diluent_volume,
diluent,
dest,
air_gap=air_gap_volume,
new_tip='never')
pipette.drop_tip()
# Dilution of samples across the 96-well flat bottom plate
if tip_use_strategy == 'never':
pipette.pick_up_tip()
for source_set, dest_set in zip(dilution_source_sets,
dilution_destination_sets):
for s, d in zip(source_set, dest_set):
pipette.transfer(
transfer_volume,
s,
d,
air_gap=air_gap_volume,
mix_after=(5, total_mixing_volume/2),
new_tip=tip_use_strategy)
if tip_use_strategy == 'never':
pipette.drop_tip()
if blank_on:
pipette.pick_up_tip()
for blank_well in blank_set:
pipette.transfer(
diluent_volume,
diluent,
blank_well,
air_gap=air_gap_volume,
new_tip='never')
pipette.drop_tip()
Any help is very much appreciated. Thank you!
Currently the robot just goes through all the columns but we want to find a way to have it stop as a specific column.

Related

(Pymongo) connect/adding relationship with two column from different collections in MongoDB

I'm practicing use python to commit rollback MongoDB (Pymongo)
For example, I have two collections, one collection store all amount of money of each user and log collection is saving data of with roll or input money in bank
total_money_collection {"user": "Joy" , "total_money" : 100, "ID" : 999}
log_money_collection {"user": "Joy" , "in_out_put_money" : null , "ID" : 999}
I need a hand for simpler way, maybe there is a short pymongo command,or MongoDB can do such operation that just extract the result is fine etc.
If I input from log_money_collection {"user": "Joy" , "in_out_put_money" : -7 , "ID" : 999},
how can "in_out_put_money" column effect "total_money" column
Expected output:
total_money_collection {"user": "Joy" , "total_money" : 93, "ID" :
999}
This is my code (I made lot of make an unnecessary move, I believe there is a simpler way):
import pymongo
import datetime
import json
from bson.objectid import ObjectId
from bson import json_util
import re
def init_db(ip, db, coll):
myclient = pymongo.MongoClient('mongodb://' + ip + '/')
mydb = myclient[db]
mycol = mydb[coll]
return mydb, mycol, myclient
def Quy_data( mycol , find_values_json , one_or_many_bool):
try:
if one_or_many_bool:
x = []
for y in mycol.find(find_values_json):
x.append(y)
cash_all_value = mycol.find({},{ "Cash_$": 1 })
else:
x = mycol.find_one(find_values_json)
cash_all_value = mycol.find({},{ "Cash_$": 1 })
return x , cash_all_value
except Exception as e:
msg_fail_reason = "error in ins_data function"
return msg_fail_reason
ip_input = input("Enter the ip: ")
exist_DB_name = input("Enter exist DB name: ")
def parse_json(data):
return json.loads(json_util.dumps(data))
try:
exist_coll = input("Enter exist collection (ex: 10_20_cash_all , 10_20_cash_log ): ")
mydb, mycol , myclient = init_db(ip_input, exist_DB_name, exist_coll)
with myclient.start_session(causal_consistency=True) as session:
# Open a transaction session
with session.start_transaction():
# mycol.insert_one({'Name': ' Gosum '}, session=session)
if exist_coll == "10_20_cash_all":
# I set find all ( = find )
one_or_many_bool = True
findvalues_str = input("Enter find data conditions: ")
find_values_json =json.loads(findvalues_str)
x , cash_all_value = Quy_data( mycol , find_values_json , one_or_many_bool )
# if someone want data in json
modified_data_x_json = parse_json(x)
cash_all_value_json = parse_json(cash_all_value)
a = str(cash_all_value_json)
print(modified_data_x_json)
print(type(modified_data_x_json))
print("= = = = = ")
print(a)
print(type(a))
b = re.search("'Cash_$': (.*)", a)
print(b)
except Exception as e:
# Output exception content
print(e)
output for my (I find the user Joy, and try to extract the number after "total_money" then subtract "in_out_put_money")
Enter the ip: localhost
Enter exist DB name: (practice_10_14)-0004444
Enter exist collection (ex: 10_20_cash_all , 10_20_cash_log ): 10_20_cash_all
Enter find data conditions: { "name": "Joy" }
[{'_id': {'$oid': '6348d73be94317989175dc2d'}, 'name': 'Joy', 'ID': 999, 'Age': 23, 'time': {'$date': '2022-10-17T09:11:54Z'}, 'total_money': 100}]
<class 'list'>
= = = = =
[{'_id': {'$oid': '6348d73be94317989175dc2d'}, 'total_money': 100}, {'_id': {'$oid': '6348d73be94317989175dc2e'}, 'total_money': 100}, {'_id': {'$oid': '6348d73be94317989175dc2f'}, 'total_money': 100}, {'_id': {'$oid': '6348d73be94317989175dc30'}, 'total_money': 100}, {'_id': {'$oid': '6348d73be94317989175dc31'}, 'total_money': 100}, {'_id': {'$oid': '635112dea1fa85dd0cfe590b'}, 'total_money': 100}]
<class 'str'>
None
A simple step of commit rollback MongoDB with account money I turn a huge circle to get it, I need a hand for simpler way, maybe there is a short pymongo command,or MongoDB can do so that just extract the result is fine etc
the same concept make ur {"user": "Joy" , "total_money" : 93, "ID" : 999},
the "total_money" become lesser by subtracting them, for sure U can't directly do that, like python (4 - 1 something)....
here is the code:
import pymongo
import datetime
import json
from bson.objectid import ObjectId
from bson import json_util
import re
myclient = pymongo.MongoClient("mongodb://localhost/")
mydb = myclient["(practice_10_14)-0004444"]
# UD_db_data means update
def UD_db_data(mycol , myquery_json, newvalues_json, one_or_many_bool):
if one_or_many_bool == True:
x = mycol.update_many(myquery_json, newvalues_json)
else:
x = mycol.update_one(myquery_json, newvalues_json)
return x
# Quy_data = query find db_data
def Quy_data( mycol ,find_values_json , one_or_many_bool):
try:
if one_or_many_bool:
x = []
for y in mycol.find(find_values_json):
x.append(y)
else:
x = mycol.find_one(find_values_json)
return x
except Exception as e:
msg_fail_reason = "error in ins_data function"
return msg_fail_reason
mycol_one = mydb["10_20_cash_all"]
mycol_two = mydb["10_20_cash_log"]
mycol_3rd = mydb["10_20_cash_info"]
# already store 100$ in bank
# doc_two = {"ID" : 100998 , "Cash_log$" : 5 } # withdraw 5$ from bank
doc_two = input("Enter ID and log amount$: ")
doc_3rd = input("Enter extra info: ")
doc_two_dic = json.loads(doc_two)
doc_3rd_dic = json.loads(doc_3rd)
# doc_3rd = {"note" : "today is good" }
ID_input = doc_two_dic['ID']
print("ur id is :" + str(ID_input))
doc_one = {"ID" : ID_input}
with myclient.start_session() as s:
cash_all_result = mycol_one.find_one(doc_one, session=s)
def cb(s):
try:
while True:
cash_all_result = mycol_one.find_one(doc_one, session=s)
mycol_two.insert_one(doc_two_dic, session=s)
# print( 3/0 )
mycol_3rd.insert_one(doc_3rd_dic, session=s)
print( "now total is :" + str(cash_all_result['Cash_$']) )
Cash_total_int = int(cash_all_result['Cash_$'])
log_int = int(doc_two_dic['Cash_log$'])
if Cash_total_int < log_int:
print("error: withdraw is over ur balance")
break
new_Cash_total = Cash_total_int - log_int
print("now total is :" + str(new_Cash_total))
newvalues_json = { "$set" : {"Cash_$" : new_Cash_total } }
mycol_one.update_one(doc_one , newvalues_json, session=s)
fail_condition_json = {"ok" : 1 , "fail reason" : "no error "}
print(fail_condition_json)
return fail_condition_json
except Exception as e:
fail_condition_json = {"ok" : 0 , "fail reason" : "error raise on start_session()"}
print(fail_condition_json)
return fail_condition_json
s.with_transaction(cb)
I believe I do the calculation with 3 collection's data
and the output is below
Enter ID and log amount$: {"ID" : 100998 , "Cash_log$" : 23 }
Enter extra info: {"note" : "today is no good" }
ur id is :100998
now total is :72
now total is :49
{'ok': 1, 'fail reason': 'no error '}

unable to iterate through loop in python

i have a sql query which basically retrieves coin names and submits an order for each coin.
However, it only submits an order on one coin and fails to loop through the rest, not sure why thats happening .
import sys
**
import pandas as pd
postgreSQL_select_Query = "SELECT base,quote FROM instrument_static where exchange='ftx'"
cursor.execute(postgreSQL_select_Query)
row=([y for y in cursor.fetchall()])
for i in row:
base=i[0]
quote=i[1]
portfolioItems = [
{
'exchange': 'ftx',
'base': base,
'quote': quote,
'amount': 0.01,
},
]
def init():
username = us
password = passwordVal
initialise(clientId, clientSecret, us, password)
if __name__ == "__main__":
init()
result = construct_portfolio_with_params(us, portname, portfolioItems)
print(result)
You need to initialize portfolioItems prior to the loop, and then you can add to it. Try replacing this snippet of code:
...
row=([y for y in cursor.fetchall()])
portfolioItems = []
for i in row:
base=i[0]
quote=i[1]
portfolioItems.append(
{
'exchange': 'ftx',
'base': base,
'quote': quote,
'amount': 0.01,
}
)
...

How to optimize invoice validation

I'm working on database that uses lot of data. One invoice could have 7482 different articles. Validating invoice cost so much time, it took 26 minutes to validate one with 7482 articles. I find the method that take time to finish, it is the "action_move_create" inside "odoo\addons\account\models\account_invoice.py".
#api.multi
def action_move_create(self):
""" Creates invoice related analytics and financial move lines """
account_move = self.env['account.move']
for inv in self:
if not inv.journal_id.sequence_id:
raise UserError(_('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line_ids.filtered(lambda line: line.account_id):
raise UserError(_('Please add at least one invoice line.'))
if inv.move_id:
continue
if not inv.date_invoice:
inv.write({'date_invoice': fields.Date.context_today(self)})
if not inv.date_due:
inv.write({'date_due': inv.date_invoice})
company_currency = inv.company_id.currency_id
# create move lines (one per invoice line + eventual taxes and analytic lines)
iml = inv.invoice_line_move_line_get()
iml += inv.tax_line_move_line_get()
diff_currency = inv.currency_id != company_currency
# create one move line for the total and possibly adjust the other lines amount
total, total_currency, iml = inv.compute_invoice_totals(company_currency, iml)
name = inv.name or ''
if inv.payment_term_id:
totlines = inv.payment_term_id.with_context(currency_id=company_currency.id).compute(total, inv.date_invoice)[0]
res_amount_currency = total_currency
for i, t in enumerate(totlines):
if inv.currency_id != company_currency:
amount_currency = company_currency._convert(t[1], inv.currency_id, inv.company_id, inv._get_currency_rate_date() or fields.Date.today())
else:
amount_currency = False
# last line: add the diff
res_amount_currency -= amount_currency or 0
if i + 1 == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': inv.account_id.id,
'date_maturity': t[0],
'amount_currency': diff_currency and amount_currency,
'currency_id': diff_currency and inv.currency_id.id,
'invoice_id': inv.id
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': inv.account_id.id,
'date_maturity': inv.date_due,
'amount_currency': diff_currency and total_currency,
'currency_id': diff_currency and inv.currency_id.id,
'invoice_id': inv.id
})
part = self.env['res.partner']._find_accounting_partner(inv.partner_id)
line = [(0, 0, self.line_get_convert(l, part.id)) for l in iml]
line = inv.group_lines(iml, line)
line = inv.finalize_invoice_move_lines(line)
date = inv.date or inv.date_invoice
move_vals = {
'ref': inv.reference,
'line_ids': line,
'journal_id': inv.journal_id.id,
'date': date,
'narration': inv.comment,
}
move = account_move.create(move_vals)
# Pass invoice in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move.post(invoice = inv)
# make the invoice point to that move
vals = {
'move_id': move.id,
'date': date,
'move_name': move.name,
}
inv.write(vals)
return True
Could you suggest some solutions?
We suppose that the hardware is efficient to run odoo correctly.
I optimize it by using raw sql query. I made these codes in account.invoice model:
The first one is the definition of _mock_create_move_line (called in action_move_create).
def _mock_create_move_line(self, model, values, move):
bad_names = ["analytic_line_ids", "tax_ids", "analytic_tag_ids"]
other_fields = [
"currency_id", "debit", "credit", "balance",
"debit_cash_basis", "credit_cash_basis", "balance_cash_basis",
"company_currency_id", "amount_residual",
"amount_residual_currency", "tax_base_amount", "reconciled",
"company_id", "counterpart"
]
cr = self.env.cr
quote = '"{}"'.format
columns = []
columns1 = []
for i, v in enumerate(values):
v = model._add_missing_default_values(v)
account_id = self.env['account.account'].browse(v['account_id'])
# compulsory columns and some stored related columns
# related fields are not triggered, krrrrr
v.update({
'move_id': move.id,
'date_maturity': move.date,
'company_id': account_id.company_id.id,
'date': move.date,
'journal_id': move.journal_id.id,
'user_type_id': account_id.user_type_id.id,
'create_uid': self.env.uid,
'create_date': fields.Datetime.now()
})
######
temp_column = []
for name, val in sorted(v.items()):
if name in bad_names:
continue
field = model._fields[name]
if field.column_type:
col_val = field.convert_to_column(val, model, v)
temp_column.append(col_val)
if not i:
columns1.append((name, field.column_format, col_val))
columns.append(tuple(temp_column))
model.check_access_rule('create')
try:
query = "INSERT INTO {} ({}) VALUES {} RETURNING id".format(
quote(model._table),
", ".join(quote(name) for name, fmt, val in columns1),
", ".join('%s' for fmt in columns),
)
cr.execute(query, columns)
ids = cr.fetchall()
# clear the model cache to take account of the new insertion
# if not executed, relationnal field will not be updated
model.invalidate_cache()
account_move_line_ids = model.browse(ids)
account_move_line_ids.modified(other_fields)
account_move_line_ids.recompute()
# update parent_path
account_move_line_ids._parent_store_create()
except Exception as e:
_logger.info(e)
cr.rollback()
return
The second one is the overriding of native method action_move_create. I make some modification, call _mock_create_move_line if there is 'raw_sql' in the context.
#api.multi
def action_move_create(self):
""" Creates invoice related analytics and financial move lines """
# TODO : make choice between ORM or raw sql according to the context
account_move = self.env['account.move']
for inv in self:
if not inv.journal_id.sequence_id:
raise UserError(_('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line_ids.filtered(lambda line: line.account_id):
raise UserError(_('Please add at least one invoice line.'))
if inv.move_id:
continue
if not inv.date_invoice:
inv.write({'date_invoice': fields.Date.context_today(self)})
if not inv.date_due:
inv.write({'date_due': inv.date_invoice})
company_currency = inv.company_id.currency_id
# create move lines (one per invoice line + eventual taxes and analytic lines)
iml = inv.invoice_line_move_line_get()
iml += inv.tax_line_move_line_get()
diff_currency = inv.currency_id != company_currency
# create one move line for the total and possibly adjust the other lines amount
total, total_currency, iml = inv.compute_invoice_totals(company_currency, iml)
name = inv.name or ''
if inv.payment_term_id:
totlines = \
inv.payment_term_id.with_context(currency_id=company_currency.id).compute(total, inv.date_invoice)[0]
res_amount_currency = total_currency
for i, t in enumerate(totlines):
if inv.currency_id != company_currency:
amount_currency = company_currency._convert(t[1], inv.currency_id, inv.company_id,
inv._get_currency_rate_date() or fields.Date.today())
else:
amount_currency = False
# last line: add the diff
res_amount_currency -= amount_currency or 0
if i + 1 == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': inv.account_id.id,
'date_maturity': t[0],
'amount_currency': diff_currency and amount_currency,
'currency_id': diff_currency and inv.currency_id.id,
'invoice_id': inv.id
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': inv.account_id.id,
'date_maturity': inv.date_due,
'amount_currency': diff_currency and total_currency,
'currency_id': diff_currency and inv.currency_id.id,
'invoice_id': inv.id
})
part = self.env['res.partner']._find_accounting_partner(inv.partner_id)
line = [(0, 0, self.line_get_convert(l, part.id)) for l in iml]
line = inv.group_lines(iml, line)
line = inv.finalize_invoice_move_lines(line)
date = inv.date or inv.date_invoice
if self.env.context.get('raw_sql', None):
move_vals = {
'ref': inv.reference,
'journal_id': inv.journal_id.id,
'date': date,
'narration': inv.comment,
}
# remove (0, 0, ...)
# override the group_lines method to avoid looping on next instruction
new_lines = [nl[2] for nl in line]
# TODO do not call compute here, add with ...norecompute()
move = account_move.create(move_vals)
move.env.cr.commit()
self._mock_create_move_line(self.env['account.move.line'], new_lines, move)
# Pass invoice in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
# compute move, it is not triggered automatically bc raw sql insertion
# is it correct to call it like this ? find better way
move._amount_compute()
move._compute_partner_id()
move._compute_matched_percentage()
else:
# make default behavior
move_vals = {
'ref': inv.reference,
'line_ids': line,
'journal_id': inv.journal_id.id,
'date': date,
'narration': inv.comment,
}
move = account_move.create(move_vals)
move.post(invoice=inv)
# make the invoice point to that move
vals = {
'move_id': move.id,
'date': date,
'move_name': move.name,
}
inv.write(vals)
return True
Now, the execution time is less than 1 minutes for about 7000 records to insert inside invoice.move.line

Dynamodb isn't finding overlap between two date ranges

I am trying to search my database to see if a date range I am about to add overlaps with a date range that already exists in the database.
Using this question: Determine Whether Two Date Ranges Overlap
I came up with firstDay <= :end and lastDay >= :start for my FilterExpression.
def create(self, start=None, days=30):
# Create the start/end times
if start is None:
start = datetime.utcnow()
elif isinstance(start, datetime) is False:
raise ValueError('Start time must either be "None" or a "datetime"')
end = start + timedelta(days=days)
# Format the start and end string "YYYYMMDD"
start = str(start.year) + str('%02d' % start.month) + str('%02d' % start.day)
end = str(end.year) + str('%02d' % end.month) + str('%02d' % end.day)
# Search the database for overlap
days = self.connection.select(
filter='firstDay <= :end and lastDay >= :start',
attributes={
':start': {'N': start},
':end': {'N': end}
}
)
# if we get one or more days then there is overlap
if len(days) > 0:
raise ValueError('There looks to be a time overlap')
# Add the item to the database
self.connection.insert({
"firstDay": {"N": start},
"lastDay": {"N": end}
})
I am then calling the function like this:
seasons = dynamodb.Seasons()
seasons.create(start=datetime.utcnow() + timedelta(days=50))
As requested, the method looks like this:
def select(self, conditions='', filter='', attributes={}, names={}, limit=1, select='ALL_ATTRIBUTES'):
"""
Select one or more items from dynamodb
"""
# Create the condition, it should contain the datatype hash
conditions = self.hashKey + ' = :hash and ' + conditions if len(conditions) > 0 else self.hashKey + ' = :hash'
attributes[':hash'] = {"S": self.hashValue}
limit = max(1, limit)
args = {
'TableName': self.table,
'Select': select,
'ScanIndexForward': True,
'Limit': limit,
'KeyConditionExpression': conditions,
'ExpressionAttributeValues': attributes
}
if len(names) > 0:
args['ExpressionAttributeNames'] = names
if len(filter) > 0:
args['FilterExpression'] = filter
return self.connection.query(**args)['Items']
When I run the above, it keeps inserting the above start and end date into the database because it isn't finding any overlap. Why is this happening?
The table structure looks like this (JavaScript):
{
TableName: 'test-table',
AttributeDefinitions: [{
AttributeName: 'dataType',
AttributeType: 'S'
}, {
AttributeName: 'created',
AttributeType: 'S'
}],
KeySchema: [{
AttributeName: 'dataType',
KeyType: 'HASH'
}, {
AttributeName: 'created',
KeyType: 'RANGE'
}],
ProvisionedThroughput: {
ReadCapacityUnits: 5,
WriteCapacityUnits: 5
},
}
Looks like you are setting LIMIT=1. You are probably using this to say 'just return the first match found'. In fact setting Limit to 1 means you will only evaluate the first item found in the Query (i.e. in the partition value range). You probably need to remove the limit, so that each item in the partition range is evaluated for an overlap.

Editing a .txt file - Algorithm not working

I want to automatially edit .txt files with code. Everything containing victory_poins shall be removed and entered in another form after the "history={" statement. But in the end, it adds an additional history={. Why?
Code:
def überschreiben(filename,vp, capital):
data_out=open(filename,"r")
data_in=open(filename+"_output.txt","w")
vpsegment=False
for line in data_out:
if "\thistory" in line:
data_in.write(line+'\n\t\tvictory_points = { '+str(capital)+' '+str(vp)+' }\n')
if "\t\tvictory_points" in line:
vppivot=line
vpsegment=True
if vpsegment==True:
if "}" in line:
data_in.write("")
vpsegment=False
else:
data_in.write("")
else:
data_in.write(line)
data_in.close()
data_out.close()
Input:
state={
id=1
name="STATE_1" # Corsica
manpower = 322900
state_category = town
history={
owner = FRA
victory_points = { 3838 1 }
buildings = {
infrastructure = 4
industrial_complex = 1
air_base = 1
3838 = {
naval_base = 3
}
}
add_core_of = FRA
}
provinces={
3838 9851 11804
}
}
Output:
[...]
state_category = town
history={
victory_points = { 00001 8 }
history={
owner = FRA
buildings = {
infrastructure = 4
industrial_complex = 1
air_base = 1
3838 = {
naval_base = 3
}
}
add_core_of = FRA
}
provinces={
3838 9851 11804
}
}
Where does the second history={ come from?
Let's look at what happens when you read the line history{ :
if "\thistory" in line:
data_in.write(line+'\n\t\tvictory_points = { '+str(capital)+' '+str(vp)+' }\n')
The line contains "\thistory" so it writes the lines (it writes the first "history{") and other things
if "\t\tvictory_points" in line:
vppivot=line
vpsegment=True
Nothing happens because the line does not contain "\t\tvictory_points"
if vpsegment==True:
if "}" in line:
data_in.write("")
vpsegment=False
else:
data_in.write("")
else:
data_in.write(line)
vpsegment==False so it goes to the else statement and write the line which is "\thistory{"

Categories