Skip to content

Commit

Permalink
flux-account-priority-update: use column names
Browse files Browse the repository at this point in the history
Problem: The flux-account-priority-update script uses row indexes to
build the data that is sent to the priority plugin, but this is
problematic because it relies on internal knowledge of the DB schema
and it is unreadable.

Use the conn.row_factory property to access row values by column name
instead of by index.
  • Loading branch information
cmoussa1 committed Feb 6, 2025
1 parent f951766 commit d6dc133
Showing 1 changed file with 19 additions and 18 deletions.
37 changes: 19 additions & 18 deletions src/cmd/flux-account-priority-update.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ def est_sqlite_conn(path):

def bulk_update(path):
conn = est_sqlite_conn(path)
conn.row_factory = sqlite3.Row
cur = conn.cursor()

data = {}
Expand All @@ -83,18 +84,18 @@ def bulk_update(path):
):
# create a JSON payload with the results of the query
single_user_data = {
"userid": int(row[0]),
"bank": str(row[1]),
"def_bank": str(row[2]),
"fairshare": float(row[3]),
"max_running_jobs": int(row[4]),
"max_active_jobs": int(row[5]),
"queues": str(row[6]),
"active": int(row[7]),
"projects": str(row[8]),
"def_project": str(row[9]),
"max_nodes": int(row[10]),
"max_cores": int(row[11]),
"userid": int(row["userid"]),
"bank": str(row["bank"]),
"def_bank": str(row["default_bank"]),
"fairshare": float(row["fairshare"]),
"max_running_jobs": int(row["max_running_jobs"]),
"max_active_jobs": int(row["max_active_jobs"]),
"queues": str(row["queues"]),
"active": int(row["active"]),
"projects": str(row["projects"]),
"def_project": str(row["default_project"]),
"max_nodes": int(row["max_nodes"]),
"max_cores": int(row["max_cores"]),
}
bulk_user_data.append(single_user_data)

Expand All @@ -106,11 +107,11 @@ def bulk_update(path):
for row in cur.execute("SELECT * FROM queue_table"):
# create a JSON payload with the results of the query
single_q_data = {
"queue": str(row[0]),
"min_nodes_per_job": int(row[1]),
"max_nodes_per_job": int(row[2]),
"max_time_per_job": int(row[3]),
"priority": int(row[4]),
"queue": str(row["queue"]),
"min_nodes_per_job": int(row["min_nodes_per_job"]),
"max_nodes_per_job": int(row["max_nodes_per_job"]),
"max_time_per_job": int(row["max_time_per_job"]),
"priority": int(row["priority"]),
}
bulk_q_data.append(single_q_data)

Expand All @@ -122,7 +123,7 @@ def bulk_update(path):
for row in cur.execute("SELECT project FROM project_table"):
# create a JSON payload with the results of the query
single_project = {
"project": str(row[0]),
"project": str(row["project"]),
}
bulk_proj_data.append(single_project)

Expand Down

0 comments on commit d6dc133

Please sign in to comment.