make GSI less shit

This commit is contained in:
2026-02-16 09:13:33 -05:00
parent f8b0b1c3ae
commit 29fe8a60c3
3 changed files with 209 additions and 36 deletions

View File

@@ -20,6 +20,9 @@
// delete → for each GSI, extract GSI key attrs from the OLD item, delete GSI entry
// update → delete OLD GSI entries, write NEW GSI entries
//
// ATOMICITY: All GSI operations use WriteBatch to ensure that GSI entries are
// maintained atomically with the base item write/delete.
//
package dynamodb
import "core:slice"
@@ -156,14 +159,95 @@ gsi_project_item :: proc(
}
// ============================================================================
// GSI Write Maintenance
// GSI Write Maintenance - ATOMIC via WriteBatch
//
// Called after a successful data write to maintain GSI entries.
// Uses WriteBatch for atomicity (all GSI entries for one item in one batch).
// These procedures add GSI operations to a WriteBatch instead of performing
// direct database writes. This ensures atomicity with the base item operation.
// ============================================================================
// Add GSI write operations to a WriteBatch for an item across all GSIs.
// Called during put_item or update_item to maintain NEW GSI entries.
gsi_batch_write_entries :: proc(
batch: ^rocksdb.WriteBatch,
table_name: string,
item: Item,
metadata: ^Table_Metadata,
) -> Storage_Error {
gsis, has_gsis := metadata.global_secondary_indexes.?
if !has_gsis || len(gsis) == 0 {
return .None
}
for &gsi in gsis {
// Extract GSI key from item
gsi_kv, kv_ok := gsi_extract_key_values(item, gsi.key_schema)
if !kv_ok {
continue // Sparse: item doesn't have GSI PK, skip
}
// Build GSI storage key
gsi_storage_key := build_gsi_key(table_name, gsi.index_name, gsi_kv.pk, gsi_kv.sk)
defer delete(gsi_storage_key)
// Build projected item
projected := gsi_project_item(item, &gsi, metadata.key_schema)
defer item_destroy(&projected)
// Encode projected item
encoded, encode_ok := encode(projected)
if !encode_ok {
return .Serialization_Error
}
defer delete(encoded)
// Add to batch (not written yet)
rocksdb.batch_put(batch, gsi_storage_key, encoded)
}
return .None
}
// Add GSI delete operations to a WriteBatch for an item across all GSIs.
// Called during delete_item or update_item to remove OLD GSI entries.
// Needs the OLD item to know which GSI keys to remove.
gsi_batch_delete_entries :: proc(
batch: ^rocksdb.WriteBatch,
table_name: string,
old_item: Item,
metadata: ^Table_Metadata,
) -> Storage_Error {
gsis, has_gsis := metadata.global_secondary_indexes.?
if !has_gsis || len(gsis) == 0 {
return .None
}
for &gsi in gsis {
gsi_kv, kv_ok := gsi_extract_key_values(old_item, gsi.key_schema)
if !kv_ok {
continue // Item didn't have a GSI entry
}
gsi_storage_key := build_gsi_key(table_name, gsi.index_name, gsi_kv.pk, gsi_kv.sk)
defer delete(gsi_storage_key)
// Add to batch (not written yet)
rocksdb.batch_delete(batch, gsi_storage_key)
}
return .None
}
// ============================================================================
// DEPRECATED - Non-atomic GSI maintenance
//
// These procedures are kept for backwards compatibility but should NOT be used.
// They perform individual database writes which is NOT atomic.
// Use gsi_batch_write_entries and gsi_batch_delete_entries instead.
// ============================================================================
// DEPRECATED: Use gsi_batch_write_entries instead for atomic operations.
// Write GSI entries for an item across all GSIs defined on the table.
// Should be called AFTER the main data key is written.
// WARNING: This performs individual writes which is NOT atomic!
gsi_write_entries :: proc(
engine: ^Storage_Engine,
table_name: string,
@@ -207,9 +291,9 @@ gsi_write_entries :: proc(
return .None
}
// DEPRECATED: Use gsi_batch_delete_entries instead for atomic operations.
// Delete GSI entries for an item across all GSIs.
// Should be called BEFORE or AFTER the main data key is deleted.
// Needs the OLD item to know which GSI keys to remove.
// WARNING: This performs individual writes which is NOT atomic!
gsi_delete_entries :: proc(
engine: ^Storage_Engine,
table_name: string,