Files
jormun-db/dynamodb/batch.odin

263 lines
7.4 KiB
Odin
Raw Normal View History

2026-02-16 00:18:20 -05:00
// BatchWriteItem and BatchGetItem storage operations
//
// BatchWriteItem: Puts or deletes multiple items across one or more tables.
// - Up to 25 items per batch (DynamoDB limit)
// - Each item is an independent PutRequest or DeleteRequest
// - Partial failures are reported via UnprocessedItems
//
// BatchGetItem: Retrieves multiple items from one or more tables.
// - Up to 100 items per batch (DynamoDB limit)
// - Each table request contains a list of Keys
// - Partial failures reported via UnprocessedKeys
package dynamodb
// ============================================================================
// BatchWriteItem Types
// ============================================================================
Write_Request_Type :: enum {
Put,
Delete,
}
Write_Request :: struct {
type: Write_Request_Type,
item: Item, // For Put: the full item. For Delete: the key item.
}
Batch_Write_Table_Request :: struct {
table_name: string,
requests: []Write_Request,
}
Batch_Write_Result :: struct {
// UnprocessedItems — requests that failed and should be retried.
// For now we process everything or return an error, so this is
// typically empty. Populated only on partial failures.
unprocessed: [dynamic]Batch_Write_Table_Request,
}
batch_write_result_destroy :: proc(result: ^Batch_Write_Result) {
for &table_req in result.unprocessed {
for &req in table_req.requests {
item_destroy(&req.item)
}
delete(table_req.requests)
}
delete(result.unprocessed)
}
// ============================================================================
// BatchWriteItem — Execute a batch of put/delete operations
//
// DynamoDB semantics:
// - Operations within a batch are NOT atomic (some may succeed, some fail)
// - Each operation is validated independently
// - Failed operations go into UnprocessedItems
// - Limit: 25 operations total across all tables
// ============================================================================
batch_write_item :: proc(
engine: ^Storage_Engine,
table_requests: []Batch_Write_Table_Request,
) -> (Batch_Write_Result, Storage_Error) {
result := Batch_Write_Result{
unprocessed = make([dynamic]Batch_Write_Table_Request),
}
2026-02-16 09:44:47 -05:00
// Count total operations across all tables
total_ops := 0
for table_req in table_requests {
total_ops += len(table_req.requests)
}
// Enforce DynamoDB limit: 25 operations per batch
if total_ops > 25 {
return result, .Validation_Error
}
2026-02-16 00:18:20 -05:00
for table_req in table_requests {
failed_requests := make([dynamic]Write_Request)
for req in table_req.requests {
var_err: Storage_Error
switch req.type {
case .Put:
var_err = put_item(engine, table_req.table_name, req.item)
case .Delete:
var_err = delete_item(engine, table_req.table_name, req.item)
}
2026-02-16 09:44:47 -05:00
// Distinguish validation errors from transient failures
2026-02-16 00:18:20 -05:00
if var_err != .None {
2026-02-16 09:44:47 -05:00
#partial switch var_err {
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
// Hard validation errors — fail the entire batch
batch_write_result_destroy(&result)
delete(failed_requests)
return result, var_err
2026-02-21 20:50:14 -05:00
case .Table_Not_Found:
// Non-existent table is a hard request failure, not a retryable condition.
// DynamoDB returns ResourceNotFoundException for the whole request.
batch_write_result_destroy(&result)
delete(failed_requests)
return result, .Table_Not_Found
case .RocksDB_Error, .Item_Not_Found:
// Genuinely transient/infrastructure errors — add to UnprocessedItems.
2026-02-16 09:44:47 -05:00
failed_item := item_deep_copy(req.item)
append(&failed_requests, Write_Request{
type = req.type,
item = failed_item,
})
case .None, .Validation_Error, .Internal_Error:
// Should not happen, but handle gracefully
failed_item := item_deep_copy(req.item)
append(&failed_requests, Write_Request{
type = req.type,
item = failed_item,
})
}
2026-02-16 00:18:20 -05:00
}
}
if len(failed_requests) > 0 {
append(&result.unprocessed, Batch_Write_Table_Request{
table_name = table_req.table_name,
requests = failed_requests[:],
})
} else {
delete(failed_requests)
}
}
return result, .None
}
2026-02-16 09:44:47 -05:00
2026-02-16 00:18:20 -05:00
// ============================================================================
// BatchGetItem Types
// ============================================================================
Batch_Get_Table_Request :: struct {
table_name: string,
keys: []Item,
}
Batch_Get_Table_Result :: struct {
table_name: string,
items: []Item,
}
Batch_Get_Result :: struct {
responses: [dynamic]Batch_Get_Table_Result,
unprocessed_keys: [dynamic]Batch_Get_Table_Request,
}
batch_get_result_destroy :: proc(result: ^Batch_Get_Result) {
for &table_result in result.responses {
for &item in table_result.items {
item_destroy(&item)
}
delete(table_result.items)
}
delete(result.responses)
for &table_req in result.unprocessed_keys {
for &key in table_req.keys {
item_destroy(&key)
}
delete(table_req.keys)
}
delete(result.unprocessed_keys)
}
// ============================================================================
// BatchGetItem — Retrieve multiple items from one or more tables
//
// DynamoDB semantics:
// - Each key is fetched independently
// - Missing items are silently omitted (no error)
// - Failed lookups go into UnprocessedKeys
// - Limit: 100 keys total across all tables
// ============================================================================
batch_get_item :: proc(
engine: ^Storage_Engine,
table_requests: []Batch_Get_Table_Request,
) -> (Batch_Get_Result, Storage_Error) {
result := Batch_Get_Result{
responses = make([dynamic]Batch_Get_Table_Result),
unprocessed_keys = make([dynamic]Batch_Get_Table_Request),
}
2026-02-16 09:44:47 -05:00
// Count total keys across all tables
total_keys := 0
for table_req in table_requests {
total_keys += len(table_req.keys)
}
// Enforce DynamoDB limit: 100 keys per batch
if total_keys > 100 {
return result, .Validation_Error
}
2026-02-16 00:18:20 -05:00
for table_req in table_requests {
found_items := make([dynamic]Item)
failed_keys := make([dynamic]Item)
for key in table_req.keys {
item_result, get_err := get_item(engine, table_req.table_name, key)
2026-02-16 09:44:47 -05:00
// Distinguish validation errors from transient failures
2026-02-16 00:18:20 -05:00
if get_err != .None && get_err != .Item_Not_Found {
2026-02-16 09:44:47 -05:00
#partial switch get_err {
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
// Hard validation error — fail the entire batch
batch_get_result_destroy(&result)
delete(found_items)
delete(failed_keys)
return result, get_err
case .RocksDB_Error, .Table_Not_Found:
// Transient error — add to unprocessed
append(&failed_keys, item_deep_copy(key))
continue
case .None, .Validation_Error, .Internal_Error, .Item_Not_Found:
// Should not happen here, but handle gracefully
continue
}
2026-02-16 00:18:20 -05:00
}
if item_val, has_item := item_result.?; has_item {
append(&found_items, item_val)
}
// If item not found, silently omit (DynamoDB behavior)
}
if len(found_items) > 0 {
append(&result.responses, Batch_Get_Table_Result{
table_name = table_req.table_name,
items = found_items[:],
})
} else {
delete(found_items)
}
if len(failed_keys) > 0 {
append(&result.unprocessed_keys, Batch_Get_Table_Request{
table_name = table_req.table_name,
keys = failed_keys[:],
})
} else {
delete(failed_keys)
}
}
return result, .None
}