make GSI less shit

This commit is contained in:
2026-02-16 09:13:33 -05:00
parent f8b0b1c3ae
commit 29fe8a60c3
3 changed files with 209 additions and 36 deletions

View File

@@ -729,6 +729,7 @@ delete_table :: proc(engine: ^Storage_Engine, table_name: string) -> Storage_Err
// ============================================================================
// Put item — uses EXCLUSIVE lock (write operation)
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI updates are atomic
put_item :: proc(engine: ^Storage_Engine, table_name: string, item: Item) -> Storage_Error {
table_lock := get_or_create_table_lock(engine, table_name)
sync.rw_mutex_lock(table_lock)
@@ -771,34 +772,59 @@ put_item :: proc(engine: ^Storage_Engine, table_name: string, item: Item) -> Sto
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
defer delete(storage_key)
// --- GSI cleanup: delete OLD GSI entries if item already exists ---
// --- Check if item already exists (need old item for GSI cleanup) ---
old_item: Maybe(Item) = nil
existing_value, existing_err := rocksdb.db_get(&engine.db, storage_key)
if existing_err == .None && existing_value != nil {
defer delete(existing_value)
old_item, decode_ok := decode(existing_value)
decoded_old, decode_ok := decode(existing_value)
if decode_ok {
defer item_destroy(&old_item)
gsi_delete_entries(engine, table_name, old_item, &metadata)
old_item = decoded_old
}
}
// Cleanup old_item at the end
defer {
if old, has_old := old_item.?; has_old {
old_copy := old
item_destroy(&old_copy)
}
}
// Encode item
// Encode new item
encoded_item, encode_ok := encode(item)
if !encode_ok {
return .Serialization_Error
}
defer delete(encoded_item)
// Store in RocksDB
put_err := rocksdb.db_put(&engine.db, storage_key, encoded_item)
if put_err != .None {
// --- ATOMIC WRITE BATCH: base item + all GSI updates ---
batch, batch_err := rocksdb.batch_create()
if batch_err != .None {
return .RocksDB_Error
}
defer rocksdb.batch_destroy(&batch)
// --- GSI maintenance: write NEW GSI entries ---
gsi_err := gsi_write_entries(engine, table_name, item, &metadata)
if gsi_err != .None {
return gsi_err
// Add base item write to batch
rocksdb.batch_put(&batch, storage_key, encoded_item)
// Add old GSI entry deletions to batch (if item existed)
if old, has_old := old_item.?; has_old {
gsi_del_err := gsi_batch_delete_entries(&batch, table_name, old, &metadata)
if gsi_del_err != .None {
return gsi_del_err
}
}
// Add new GSI entry writes to batch
gsi_write_err := gsi_batch_write_entries(&batch, table_name, item, &metadata)
if gsi_write_err != .None {
return gsi_write_err
}
// Write batch atomically - ALL or NOTHING
write_err := rocksdb.batch_write(&engine.db, &batch)
if write_err != .None {
return .RocksDB_Error
}
return .None
@@ -861,6 +887,7 @@ get_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> (May
}
// Delete item — uses EXCLUSIVE lock (write operation)
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI deletions are atomic
delete_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> Storage_Error {
table_lock := get_or_create_table_lock(engine, table_name)
sync.rw_mutex_lock(table_lock)
@@ -897,20 +924,50 @@ delete_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> S
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
defer delete(storage_key)
// --- GSI cleanup: read existing item to know which GSI entries to remove ---
// --- Read existing item to know which GSI entries to remove ---
old_item: Maybe(Item) = nil
existing_value, existing_err := rocksdb.db_get(&engine.db, storage_key)
if existing_err == .None && existing_value != nil {
defer delete(existing_value)
old_item, decode_ok := decode(existing_value)
decoded_old, decode_ok := decode(existing_value)
if decode_ok {
defer item_destroy(&old_item)
gsi_delete_entries(engine, table_name, old_item, &metadata)
old_item = decoded_old
}
}
// Cleanup old_item at the end
defer {
if old, has_old := old_item.?; has_old {
old_copy := old
item_destroy(&old_copy)
}
}
// Delete from RocksDB
del_err := rocksdb.db_delete(&engine.db, storage_key)
if del_err != .None {
// If item doesn't exist, nothing to delete (not an error in DynamoDB)
if _, has_old := old_item.?; !has_old {
return .None
}
// --- ATOMIC WRITE BATCH: base item deletion + all GSI deletions ---
batch, batch_err := rocksdb.batch_create()
if batch_err != .None {
return .RocksDB_Error
}
defer rocksdb.batch_destroy(&batch)
// Add base item delete to batch
rocksdb.batch_delete(&batch, storage_key)
// Add GSI entry deletions to batch
if old, has_old := old_item.?; has_old {
gsi_del_err := gsi_batch_delete_entries(&batch, table_name, old, &metadata)
if gsi_del_err != .None {
return gsi_del_err
}
}
// Write batch atomically - ALL or NOTHING
write_err := rocksdb.batch_write(&engine.db, &batch)
if write_err != .None {
return .RocksDB_Error
}