diff --git a/dynamodb/batch.odin b/dynamodb/batch.odin index 6fb7eee..b7072f0 100644 --- a/dynamodb/batch.odin +++ b/dynamodb/batch.odin @@ -65,6 +65,17 @@ batch_write_item :: proc( unprocessed = make([dynamic]Batch_Write_Table_Request), } + // Count total operations across all tables + total_ops := 0 + for table_req in table_requests { + total_ops += len(table_req.requests) + } + + // Enforce DynamoDB limit: 25 operations per batch + if total_ops > 25 { + return result, .Validation_Error + } + for table_req in table_requests { failed_requests := make([dynamic]Write_Request) @@ -78,13 +89,31 @@ batch_write_item :: proc( var_err = delete_item(engine, table_req.table_name, req.item) } + // Distinguish validation errors from transient failures if var_err != .None { - // Deep copy the failed request for UnprocessedItems - failed_item := item_deep_copy(req.item) - append(&failed_requests, Write_Request{ - type = req.type, - item = failed_item, - }) + #partial switch var_err { + case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error: + // Hard validation errors — fail the entire batch + batch_write_result_destroy(&result) + delete(failed_requests) + return result, var_err + + case .RocksDB_Error, .Item_Not_Found, .Table_Not_Found: + // Transient/throttling errors — add to unprocessed + failed_item := item_deep_copy(req.item) + append(&failed_requests, Write_Request{ + type = req.type, + item = failed_item, + }) + + case .None, .Validation_Error, .Internal_Error: + // Should not happen, but handle gracefully + failed_item := item_deep_copy(req.item) + append(&failed_requests, Write_Request{ + type = req.type, + item = failed_item, + }) + } } } @@ -101,6 +130,7 @@ batch_write_item :: proc( return result, .None } + // ============================================================================ // BatchGetItem Types // ============================================================================ @@ -157,6 +187,17 @@ batch_get_item :: proc( unprocessed_keys = make([dynamic]Batch_Get_Table_Request), } + // Count total keys across all tables + total_keys := 0 + for table_req in table_requests { + total_keys += len(table_req.keys) + } + + // Enforce DynamoDB limit: 100 keys per batch + if total_keys > 100 { + return result, .Validation_Error + } + for table_req in table_requests { found_items := make([dynamic]Item) failed_keys := make([dynamic]Item) @@ -164,10 +205,25 @@ batch_get_item :: proc( for key in table_req.keys { item_result, get_err := get_item(engine, table_req.table_name, key) + // Distinguish validation errors from transient failures if get_err != .None && get_err != .Item_Not_Found { - // Storage error — add to unprocessed - append(&failed_keys, item_deep_copy(key)) - continue + #partial switch get_err { + case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error: + // Hard validation error — fail the entire batch + batch_get_result_destroy(&result) + delete(found_items) + delete(failed_keys) + return result, get_err + + case .RocksDB_Error, .Table_Not_Found: + // Transient error — add to unprocessed + append(&failed_keys, item_deep_copy(key)) + continue + + case .None, .Validation_Error, .Internal_Error, .Item_Not_Found: + // Should not happen here, but handle gracefully + continue + } } if item_val, has_item := item_result.?; has_item { diff --git a/dynamodb/storage.odin b/dynamodb/storage.odin index 42cfbee..e599b2d 100644 --- a/dynamodb/storage.odin +++ b/dynamodb/storage.odin @@ -22,6 +22,9 @@ Storage_Error :: enum { Serialization_Error, RocksDB_Error, Out_Of_Memory, + Validation_Error, + Request_Too_Large, + Internal_Error, } // Result type for Scan operations with pagination