I know multiple databases work on flask-sqlalchemy with __bind_key__.
But I don't know how to migrate those databases on alembic(flask-migrate).
Here's env.py:
from flask import current_app
config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_BINDS')['bind_main'])
target_metadata = {
'bind_main': current_app.extensions['migrate'].db.metadata,
'bind_follower': current_app.extensions['migrate'].db.metadata,
}
How can I set follower db on target_metadata? flask-migrate doesn't care about bind databases.
Thanks.
To create a multiple database migration repository, add the --multidb argument to the init command:
$ python app.py db init --multidb
For more details please refer to flask-migrate documentation
It may be easier to scrap the old "migrations" folder and initialize the database again with the new setup, applying any customizations to the fresh directory. Most likely this would include modifying env.py or script.py.mako.
Diff init vs init --multidb
I ran a diff of the results for flask db init (i.e. single.migrations) vs flask db init --multidb (i.e. multi.migrations):
diff --suppress-common-lines single.migrations/README multi.migrations/README
1c1
< Single-database configuration for Flask.
---
> Multi-database configuration for Flask.
diff --suppress-common-lines single.migrations/env.py multi.migrations/env.py
5a6
> from sqlalchemy import MetaData
9a11,12
> USE_TWOPHASE = False
>
26a30,42
> bind_names = []
> if current_app.config.get('SQLALCHEMY_BINDS') is not None:
> bind_names = list(current_app.config['SQLALCHEMY_BINDS'].keys())
> else:
> get_bind_names = getattr(current_app.extensions['migrate'].db,
> 'bind_names', None)
> if get_bind_names:
> bind_names = get_bind_names()
> for bind in bind_names:
> context.config.set_section_option(
> bind, "sqlalchemy.url",
> str(current_app.extensions['migrate'].db.get_engine(
> bind=bind).url).replace('%', '%%'))
28a45
>
34a52,62
> def get_metadata(bind):
> """Return the metadata for a bind."""
> if bind == '':
> bind = None
> m = MetaData()
> for t in target_metadata.tables.values():
> if t.info.get('bind_key') == bind:
> t.tometadata(m)
> return m
>
>
47,50c75,76
< url = config.get_main_option("sqlalchemy.url")
< context.configure(
< url=url, target_metadata=target_metadata, literal_binds=True
< )
---
> # for the --sql use case, run migrations for each URL into
> # individual files.
52,53c78,99
< with context.begin_transaction():
< context.run_migrations()
---
> engines = {
> '': {
> 'url': context.config.get_main_option('sqlalchemy.url')
> }
> }
> for name in bind_names:
> engines[name] = rec = {}
> rec['url'] = context.config.get_section_option(name, "sqlalchemy.url")
>
> for name, rec in engines.items():
> logger.info("Migrating database %s" % (name or '<default>'))
> file_ = "%s.sql" % name
> logger.info("Writing output to %s" % file_)
> with open(file_, 'w') as buffer:
> context.configure(
> url=rec['url'],
> output_buffer=buffer,
> target_metadata=get_metadata(name),
> literal_binds=True,
> )
> with context.begin_transaction():
> context.run_migrations(engine_name=name)
70,85c116,169
< if script.upgrade_ops.is_empty():
< directives[:] = []
< logger.info('No changes in schema detected.')
<
< connectable = current_app.extensions['migrate'].db.get_engine()
<
< with connectable.connect() as connection:
< context.configure(
< connection=connection,
< target_metadata=target_metadata,
< process_revision_directives=process_revision_directives,
< **current_app.extensions['migrate'].configure_args
< )
<
< with context.begin_transaction():
< context.run_migrations()
---
> if len(script.upgrade_ops_list) >= len(bind_names) + 1:
> empty = True
> for upgrade_ops in script.upgrade_ops_list:
> if not upgrade_ops.is_empty():
> empty = False
> if empty:
> directives[:] = []
> logger.info('No changes in schema detected.')
>
> # for the direct-to-DB use case, start a transaction on all
> # engines, then run all migrations, then commit all transactions.
> engines = {
> '': {'engine': current_app.extensions['migrate'].db.get_engine()}
> }
> for name in bind_names:
> engines[name] = rec = {}
> rec['engine'] = current_app.extensions['migrate'].db.get_engine(
> bind=name)
>
> for name, rec in engines.items():
> engine = rec['engine']
> rec['connection'] = conn = engine.connect()
>
> if USE_TWOPHASE:
> rec['transaction'] = conn.begin_twophase()
> else:
> rec['transaction'] = conn.begin()
>
> try:
> for name, rec in engines.items():
> logger.info("Migrating database %s" % (name or '<default>'))
> context.configure(
> connection=rec['connection'],
> upgrade_token="%s_upgrades" % name,
> downgrade_token="%s_downgrades" % name,
> target_metadata=get_metadata(name),
> process_revision_directives=process_revision_directives,
> **current_app.extensions['migrate'].configure_args
> )
> context.run_migrations(engine_name=name)
>
> if USE_TWOPHASE:
> for rec in engines.values():
> rec['transaction'].prepare()
>
> for rec in engines.values():
> rec['transaction'].commit()
> except: # noqa: E722
> for rec in engines.values():
> rec['transaction'].rollback()
> raise
> finally:
> for rec in engines.values():
> rec['connection'].close()
diff --suppress-common-lines single.migrations/script.py.mako multi.migrations/script.py.mako
1c1,4
< """${message}
---
> <%!
> import re
>
> %>"""${message}
19,20c22,48
< def upgrade():
< ${upgrades if upgrades else "pass"}
---
> def upgrade(engine_name):
> globals()["upgrade_%s" % engine_name]()
>
>
> def downgrade(engine_name):
> globals()["downgrade_%s" % engine_name]()
>
> <%
> from flask import current_app
> bind_names = []
> if current_app.config.get('SQLALCHEMY_BINDS') is not None:
> bind_names = list(current_app.config['SQLALCHEMY_BINDS'].keys())
> else:
> get_bind_names = getattr(current_app.extensions['migrate'].db, 'bind_names', None)
> if get_bind_names:
> bind_names = get_bind_names()
> db_names = [''] + bind_names
> %>
>
> ## generate an "upgrade_<xyz>() / downgrade_<xyz>()" function
> ## for each database name in the ini file.
>
> % for db_name in db_names:
>
> def upgrade_${db_name}():
> ${context.get("%s_upgrades" % db_name, "pass")}
>
21a50,51
> def downgrade_${db_name}():
> ${context.get("%s_downgrades" % db_name, "pass")}
23,24c53
< def downgrade():
< ${downgrades if downgrades else "pass"}
---
> % endfor
Common subdirectories: single.migrations/versions and multi.migrations/versions
Note: lines prefixed with < are in the single and lines prefixed with > are found in the multiple. StackOverflow isn't the best medium to display the diff. Alternative diff -y --color=always (or another diff tool) is much easier to view.
For summary it's a few lines were removed in env.py and new lines were added to env.py script.py.mako to accommodate bind keys.
Specify New Default
To start with a new directory and retain the old in the codebase for comparison, specify the directory during the initialization:
flask init db --multidb --directory"multi.migrations"
The new directory can be specified in the program's Migrate constructor:
migrate = Migrate(directory="multi.migrations")
Likewise, in python interactive, a directory argument can be passed to the API calls:
show(directory='migrations', revision='head')
Related
I have a task in which I must update a database on another server. As my options are limited I'm using python to do the update.
However I have this error:
pyodbc.ProgrammingError: ('42000', "[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Incorrect syntax near ')'. (102) (SQLExecDirectW)")
My code is this:
first I create a select and then use it in the update
> query_dwt = "SELECT [cdcliente]\
> ,[nmcontato]\
> ,[cddepartamento]\
> ,[nmcargo] \
> ,[dsemail]\
> ,[cdlingua]\
> ,[nrcpfcnpj]\
> ,[cdcargo]\
> ,[cdcontatosuperior]\
> ,[idativo]\
> ,[cdcidade]\
> ,[dsendereco]\
> ,[dscomplemento]\
> ,[nmbairro]\
> ,[nrcep]\
> ,[nrcelular]\
> ,[dtnascimento]\
> ,[idbloqueado]\
> ,[cdlocalidade]\
> ,[nrmatricula]\
> ,[nmskin]\
> FROM [dw].[d_Qualitor_ad_contato_RH] WITH (NOLOCK)\
> WHERE cdcliente = 9402\
> AND (cdcontato = 38584 OR cdcontato = 22320 OR cdcontato = 37284);"
Second I use the select created to bring the information from the table to update the desired table
> query_qltr = """UPDATE ad\
> SET\
> ad.nmcontato = PR.nmcontato\
> ,ad.cddepartamento = PR.cddepartamento\
> ,ad.nmcargo = PR.nmcargo\
> ,ad.dsemail = PR.dsemail\
> ,ad.cdlingua = PR.cdlingua\
> ,ad.nrcpfcnpj = PR.nrcpfcnpj\
> ,ad.cdcargo = PR.cdcargo\
> ,ad.cdcontatosuperior = PR.cdcontatosuperior\
> ,ad.idativo = PR.idativo\
> ,ad.cdcidade = PR.cdcidade\
> ,ad.dsendereco = PR.dsendereco\
> ,ad.dscomplemento = PR.dscomplemento\
> ,ad.nmbairro = PR.nmbairro\
> ,ad.nrcep = PR.nrcep\
> ,ad.nrcelular = PR.nrcelular\
> ,ad.dtnascimento = PR.dtnascimento\
> ,ad.idbloqueado = PR.idbloqueado\
> ,ad.cdlocalidade = PR.cdlocalidade\
> ,ad.nrmatricula = PR.nrmatricula\
> ,ad.nmskin = PR.nmskin\
> FROM dbo.ad_contato ad\
> INNER JOIN ({}) PR\
> ON ad.cdcontato = PR.cdcontato\
> AND ad.cdcliente LIKE '9402';""".format(OpenSqlDatabaseConnection.execute_query(query_dwt,'target-db-conn-str'))
>
> OpenSqlDatabaseConnection.execute_query(query_qltr,'rdn-db-clt-sql-06a-inssql01-qualitor-prd-jdbc-conn-string-01')
I'm sure it's something simple but I can't figure it out.
Solution:
1-Extract the data in the first database and insert it into a dataframe.
def select_dw_qualitor_ad_contato():
query_dwt = "SELECT [cdcliente]\
,[cdcontato]\
,[nmcontato]\
,[cddepartamento]\
,[nmcargo] \
,[dsemail]\
,[cdlingua]\
,[nrcpfcnpj]\
,[cdcargo]\
,[cdcontatosuperior]\
,[idativo]\
,[cdcidade]\
,[dsendereco]\
,[dscomplemento]\
,[nmbairro]\
,[nrcep]\
,[nrcelular]\
,[dtnascimento]\
,[idbloqueado]\
,[cdlocalidade]\
,[nrmatricula]\
,[nmskin]\
FROM [dw].[d_Qualitor_ad_contato_RH] WITH (NOLOCK)\
WHERE cdcliente = 9402\
AND (cdcontato = 38584\
OR cdcontato = 22320\
OR cdcontato = 37284\
OR cdcontato = 36139\
OR cdcontato = 41035\
OR cdcontato = 38819);"
return pd.read_sql(query_dwt,OpenSqlDatabaseConnection.connection('target-db-conn-str'),
parse_dates={"date_column": {"errors": "ignore"}})
2- Update second query row by row with dataframe
def update_qualitor_table():
dfdw = QueriesLists.select_dw_qualitor_ad_contato()
end = len(dfdw)
for i,line in enumerate(dfdw):
if i < end:
df = dfdw.iloc[i]
QueriesLists.update_database(df)
else:
break
3- Query SQL using update command using dataframe
def update_database(df):
query_qltr = "UPDATE [dbo].[ad_contato]\
SET [nmcontato] = CAST('{0}' AS VARCHAR(200))\
,[cddepartamento] = CAST('{1}' AS INT)\
,[nmcargo] = CAST('{2}' AS VARCHAR (50))\
,[dsemail] = CAST('{3}' AS VARCHAR(200))\
,[cdlingua] = CAST('{4}' AS INT)\
,[nrcpfcnpj] = CAST('{5}' AS VARCHAR(20))\
,[cdcargo] = CAST('{6}' AS INT)\
,[cdcontatosuperior] = CAST('{7}' AS INT)\
,[idativo] = CAST('{8}' AS VARCHAR(1))\
,[dsendereco] = CAST('{9}' AS VARCHAR(200))\
,[dscomplemento] = CAST('{10}' AS VARCHAR(200))\
,[nmbairro] = CAST('{11}' AS VARCHAR(40))\
,[nrcep] = CAST('{12}' AS VARCHAR(9))\
,[dtnascimento] = CAST('{13}' AS DATETIME) \
,[idbloqueado] = CAST('{14}' AS VARCHAR(1))\
,[cdlocalidade] = CAST('{15}' AS INT)\
,[nrmatricula] = CAST('{16}' AS VARCHAR(20))\
WHERE [cdcontato] = CAST('{17}' AS INT)\
AND [cdcliente] = 9402;\
".format(str(df[2]) #nmcontato
,int(df[3]) #cddepartamento
,str(df[4]) #nmcargo
,str(df[5]) #dsemai
,int(df[6]) #cdlingua
,str(df[7]) #nrcpfcnpj
,int(df[8]) #cdcargo
,int(df[9]) #cdcontasuperior
,str(df[10]) #idativo
,str(df[12]) #dsendereco
,str(df[13]) #dscomplemento
,str(df[14]) #nmbairro
,str(df[15]) #nrcep
,pd.to_datetime(df[17]) #datetime
,str(df[18]) #idbloqueado
,int(df[19]) #cdlocalidade
,str(df[20]) #nrmatricula
,int(df[1])
)
OpenSqlDatabaseConnection.execute_query(query_qltr,'rdn-db-clt-sql-06a-inssql01-qualitor-prd-jdbc-conn-string-01')
I have aloe of excel files I am trying to convert to python codes and need some help :)
I have a data frame like this:
Date STD-3 STD-25 STD-2 STD-15 STD-1 Data STD1 STD15 STD2 STD25 STD3
11.05.2022 -0,057406797 -0,047838998 -0,038271198 -0,028703399 -0,019135599 0,021233631 0,019135599 0,028703399 0,038271198 0,047838998 0,057406797
I need to check for this logic:
"Data" < "STD1" and "Data" > "STD-1" = 0
"Data" > "STD1" and "Data" < "STD15" = 1
"Data" > "STD15" and "Data" < "STD2" = 1,5
"Data" > "STD2" and "Data" < "STD25" = 2
"Data" > "STD25" and "Data" < "STD3" = 2,5
"Data" > "STD3" = 3
"Data" < "STD-1" and "Data" > "STD-15" = -1
"Data" < "STD-15" and "Data" > "STD-2" = -1,5
"Data" < "STD-2" and "Data" > "STD-25" = -2
"Data" < "STD-25" and "Data" > "STD-3" = -2,5
"Data" > "STD3" = -3
And add the output to a new column.
condition = [((df['DATA'] < df['STD1']) & (df['DATA'] > df['STD-1'])), ((df['DATA'] > df['STD1']) & (df['DATA'] < df['STD15'])), ((df['DATA'] > df['STD15']) & (df['DATA'] < df['STD2'])), ((df['DATA'] > df['STD2']) & (df['DATA'] < df['STD25'])), ((df['DATA'] > df['STD25']) & (df['DATA'] < df['STD3'])), df['DATA'] > df['STD3'], ((df['DATA'] < df['STD-1']) & (df['DATA'] > df['STD-15'])), ((df['DATA'] < df['STD-15']) & (df['DATA'] > df['STD-2'])), ((df['DATA'] < df['STD-25']) & (df['DATA'] > df['STD-3'])), df['DATA'] > df['STD-3']]
result = [0, 1, 1.5, 2, 2.5, 3, -1, -1.5, -2.5, -3]
df['RESULT'] = np.select(condition, result, None)
Currently I'm trying with my hobby 2 years of hobby scripting in python to optimize my old codes.
For a script in the past I had a check for giving out resources to a player. The whole calculation for 100.000 did take about 25 minutes. This has been shortened to a mere 1 minute and 40 seconds with a filter that if resources are equal to maximum storage it will not get the row from the table.
There are still checks in place in case one of te 3 resource types are full (so other resources will receive their production bonus.
Production is split between 4 since it will run once in 15 minutes.
Old code was a 25 minute run time.
The code displayed here beneath does run over 100.000 "villages" within 1 minute and 40 seconds.
2,3 seconds to withdraw all data
0,2 seconds to write all data to database
the rest (1 minute and 37,5 seconds) is purely what runs between
for village in villages: and session.add(add_resources)
Is it possible to speed this up even further or are these the limits of Python itself?
time_start, villages = datetime.utcnow(), new_get_villages()
for village in villages:
production_wood, production_stone, production_iron = int(village.wood_production/4), int(village.stone_production/4), int(village.iron_production/4)
add_resources = session.query(VillageNew).filter(VillageNew.pk == village.pk).first()
if add_resources.wood_stock != add_resources.max_storage:
add_resources.wood_stock = add_resources.wood_stock + production_wood
if add_resources.wood_stock > add_resources.max_storage:
add_resources.wood_stock = add_resources.max_storage
if add_resources.stone_stock != add_resources.max_storage:
add_resources.stone_stock = add_resources.stone_stock + production_stone
if add_resources.stone_stock > add_resources.max_storage:
add_resources.stone_stock = add_resources.max_storage
if add_resources.iron_stock != add_resources.max_storage:
add_resources.iron_stock = add_resources.iron_stock + production_iron
if add_resources.iron_stock > add_resources.max_storage:
add_resources.iron_stock = add_resources.max_storage
session.add(add_resources)
session.commit
time_end = datetime.utcnow()
session:
db_connection_string = conf.get_string('DBConf', 'db_connection_string')
engine = create_engine(db_connection_string, encoding='utf8')
Session = sessionmaker(bind=engine)
Code update after the post of #gimix
for village in session.query(VillageNew).filter(or_(VillageNew.wood_stock != VillageNew.max_storage, VillageNew.stone_stock != VillageNew.max_storage, VillageNew.iron_stock != VillageNew.max_storage)).all():
village.wood_stock = (village.wood_stock + int(village.wood_production))
if village.wood_stock > village.max_storage:
village.wood_stock = village.max_storage
village.stone_stock = (village.stone_stock + int(village.wood_production))
if village.stone_stock > village.max_storage:
village.stone_stock = village.max_storage
village.iron_stock = (village.iron_stock + int(village.wood_production))
if village.iron_stock > village.max_storage:
village.iron_stock = village.max_storage
session.commit()
Code update 02/11/2021:
session.query(VillageNew).where(VillageNew.wood_stock < VillageNew.max_storage).update({VillageNew.wood_stock: VillageNew.wood_stock + VillageNew.wood_production})
session.query(VillageNew).where(VillageNew.stone_stock < VillageNew.max_storage).update({VillageNew.stone_stock: VillageNew.stone_stock + VillageNew.stone_production})
session.query(VillageNew).where(VillageNew.iron_stock < VillageNew.max_storage).update({VillageNew.iron_stock: VillageNew.iron_stock + VillageNew.iron_production})
session.query(VillageNew).where(VillageNew.wood_stock > VillageNew.max_storage).update({VillageNew.wood_stock: VillageNew.max_storage})
session.query(VillageNew).where(VillageNew.stone_stock > VillageNew.max_storage).update({VillageNew.stone_stock: VillageNew.max_storage})
session.query(VillageNew).where(VillageNew.iron_stock > VillageNew.max_storage).update({VillageNew.iron_stock: VillageNew.max_storage})
In the end the fastest was pure SQL execution.
session.execute('UPDATE table.villagenew SET wood_stock = least(villagenew.wood_stock + villagenew.wood_production, villagenew.max_storage) WHERE villagenew.wood_stock < villagenew.max_storage')
In case feedback to the program was needed as in it is full:
session.query(VillageNew).where(VillageNew.wood_stock < VillageNew.max_storage).update({VillageNew.wood_stock: VillageNew.wood_stock + VillageNew.wood_production}) session.query(VillageNew).where(VillageNew.stone_stock < VillageNew.max_storage).update({VillageNew.stone_stock: VillageNew.stone_stock + VillageNew.stone_production}) session.query(VillageNew).where(VillageNew.iron_stock < VillageNew.max_storage).update({VillageNew.iron_stock: VillageNew.iron_stock + VillageNew.iron_production}) session.query(VillageNew).where(VillageNew.wood_stock > VillageNew.max_storage).update({VillageNew.wood_stock: VillageNew.max_storage}) session.query(VillageNew).where(VillageNew.stone_stock > VillageNew.max_storage).update({VillageNew.stone_stock: VillageNew.max_storage}) session.query(VillageNew).where(VillageNew.iron_stock > VillageNew.max_storage).update({VillageNew.iron_stock: VillageNew.max_storage})
below is the code i am trying to run but I keep getting an error which is below the code but I am having a hard time understanding where exactly the float variable is being accessed is it with the variables in the code or somewhere in the data? If someone understands the issue please help me out.
import numpy as np
import tqdm
grid = {}
grid['Validation_Set'] = {}
# Topics range
min_topics = 10
max_topics = 20
step_size = 5
topics_range = range(min_topics, max_topics, step_size)
# Alpha parameter
alpha = list(np.arange(0.01, 1, 0.3))
alpha.append('symmetric')
alpha.append('asymmetric')
# Beta parameter
beta = list(np.arange(0.01, 1, 0.3))
beta.append('symmetric')
# Validation sets
num_of_doc = len(corpus)
num_of_docs = int(num_of_doc)
corpus_sets = [# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.25),
# gensim.utils.ClippedCorpus(corpus, num_of_docs*0.5),
gensim.utils.ClippedCorpus(corpus, num_of_docs*0.75),
corpus]
corpus_title = ['75% Corpus', '100% Corpus']
model_results = {'Validation_Set': [],
'Topics': [],
'Alpha': [],
'Beta': [],
'Coherence': []
}
if 1 == 1:
pbar = tqdm.tqdm(total=540)
# iterate through validation corpuses
for i in range(len(corpus_sets)):
# iterate through number of topics
for k in topics_range:
# iterate through alpha values
for a in alpha:
# iterare through beta values
for b in beta:
# get the coherence score for the given parameters
cv = compute_coherence_values(corpus=corpus_sets[i], dictionary=id2word,
k=k, a=a, b=b)
# Save the model results
model_results['Validation_Set'].append(corpus_title[i])
model_results['Topics'].append(k)
model_results['Alpha'].append(a)
model_results['Beta'].append(b)
model_results['Coherence'].append(cv)
pbar.update(1)
pd.DataFrame(model_results).to_csv('lda_tuning_results.csv', index=False)
pbar.close()
the following is the error that I keep getting stuck into is as follows:
> > 0%| | 0/540 [00:00<?, ?it/s]
> ---------------------------------------------------------------------------
>
>
>
> > TypeError Traceback (most recent call
> > last)
> > /usr/local/lib/python3.7/dist-packages/gensim/models/ldamulticore.py
> > in update(self, corpus, chunks_as_numpy)
> > 212 try:
> > --> 213 lencorpus = len(corpus)
> > 214 except TypeError:
> >
> >
> > TypeError: 'float' object cannot be interpreted as an integer
> >
> > During handling of the above exception, another exception occurred:
> >
> > ValueError Traceback (most recent call last)
> > 5 frames
> > /usr/local/lib/python3.7/dist-packages/gensim/utils.py in __iter__(self)
> > 992
> > 993 def __iter__(self):
> > --> 994 return itertools.islice(self.corpus, self.max_docs)
> > 995
> > 996 def __len__(self):
> >
> > ValueError: Stop argument for islice() must be None or an integer: 0 <= x <= sys.maxsize.
> >
> >
you need to add an int when you are making your validation set in corpus_sets so the set is no more a float:
gensim.utils.ClippedCorpus(corpus, int(num_of_docs*0.75))
I have this raw SQL statement that I would like to use for both Core and ORM:
SELECT * FROM `buffet` WHERE `roomId` = '864495034004835' AND `recordMasa` > '1514600000' AND `recordMasa` < '1514900000' AND `recordMasa` mod 10 = 0 LIMIT 0,10000000000;
Please do let me know how can I add to my existing code below to include the modulo function:
select_statement = select([Buffet]).where(and_(
Buffet.recordMasa > arguments['startDate'],
Buffet.recordMasa < arguments['endDate'],
Buffet.roomId == arguments['ident']
))
rooms = conn.execute(select_statement).fetchall()
what about the modulo operator from python?
select_statement = select([Buffet]).where(and_(
Buffet.recordMasa > arguments['startDate'],
Buffet.recordMasa < arguments['endDate'],
Buffet.roomId == arguments['ident'],
Buffet.recordMasa % 10 == 0,
))