its kind of working now
This commit is contained in:
439
src/dynamodb/handler.zig
Normal file
439
src/dynamodb/handler.zig
Normal file
@@ -0,0 +1,439 @@
|
||||
/// DynamoDB API request handlers
|
||||
const std = @import("std");
|
||||
const http = @import("../http.zig");
|
||||
const storage = @import("storage.zig");
|
||||
const types = @import("types.zig");
|
||||
|
||||
pub const ApiHandler = struct {
|
||||
engine: *storage.StorageEngine,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, engine: *storage.StorageEngine) Self {
|
||||
return .{
|
||||
.engine = engine,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn handle(self: *Self, request: *const http.Request) http.Response {
|
||||
var response = http.Response.init(self.allocator);
|
||||
|
||||
// Add standard DynamoDB headers
|
||||
response.addHeader("Content-Type", "application/x-amz-json-1.0") catch {};
|
||||
response.addHeader("x-amzn-RequestId", "local-request-id") catch {};
|
||||
|
||||
// Get operation from X-Amz-Target header
|
||||
const target = request.getHeader("X-Amz-Target") orelse {
|
||||
return self.errorResponse(&response, .ValidationException, "Missing X-Amz-Target header");
|
||||
};
|
||||
|
||||
const operation = types.Operation.fromTarget(target);
|
||||
|
||||
switch (operation) {
|
||||
.CreateTable => self.handleCreateTable(request, &response),
|
||||
.DeleteTable => self.handleDeleteTable(request, &response),
|
||||
.DescribeTable => self.handleDescribeTable(request, &response),
|
||||
.ListTables => self.handleListTables(request, &response),
|
||||
.PutItem => self.handlePutItem(request, &response),
|
||||
.GetItem => self.handleGetItem(request, &response),
|
||||
.DeleteItem => self.handleDeleteItem(request, &response),
|
||||
.Query => self.handleQuery(request, &response),
|
||||
.Scan => self.handleScan(request, &response),
|
||||
.Unknown => {
|
||||
return self.errorResponse(&response, .ValidationException, "Unknown operation");
|
||||
},
|
||||
else => {
|
||||
return self.errorResponse(&response, .ValidationException, "Operation not implemented");
|
||||
},
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
fn handleCreateTable(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
// Parse table name from request body
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
// Simplified: create with default key schema
|
||||
const key_schema = [_]types.KeySchemaElement{
|
||||
.{ .attribute_name = "pk", .key_type = .HASH },
|
||||
};
|
||||
const attr_defs = [_]types.AttributeDefinition{
|
||||
.{ .attribute_name = "pk", .attribute_type = .S },
|
||||
};
|
||||
|
||||
const desc = self.engine.createTable(table_name, &key_schema, &attr_defs) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableAlreadyExists => {
|
||||
_ = self.errorResponse(response, .ResourceInUseException, "Table already exists");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Failed to create table");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
// Build response
|
||||
const resp_body = std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"{{\"TableDescription\":{{\"TableName\":\"{s}\",\"TableStatus\":\"{s}\",\"CreationDateTime\":{d}}}}}",
|
||||
.{ desc.table_name, desc.table_status.toString(), desc.creation_date_time },
|
||||
) catch {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Serialization failed");
|
||||
return;
|
||||
};
|
||||
defer self.allocator.free(resp_body);
|
||||
|
||||
response.setBody(resp_body) catch {};
|
||||
}
|
||||
|
||||
fn handleDeleteTable(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
self.engine.deleteTable(table_name) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableNotFound => {
|
||||
_ = self.errorResponse(response, .ResourceNotFoundException, "Table not found");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Failed to delete table");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
const resp_body = std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"{{\"TableDescription\":{{\"TableName\":\"{s}\",\"TableStatus\":\"DELETING\"}}}}",
|
||||
.{table_name},
|
||||
) catch return;
|
||||
defer self.allocator.free(resp_body);
|
||||
|
||||
response.setBody(resp_body) catch {};
|
||||
}
|
||||
|
||||
fn handleDescribeTable(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
const desc = self.engine.describeTable(table_name) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableNotFound => {
|
||||
_ = self.errorResponse(response, .ResourceNotFoundException, "Table not found");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Failed to describe table");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
const resp_body = std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"{{\"Table\":{{\"TableName\":\"{s}\",\"TableStatus\":\"{s}\",\"ItemCount\":{d},\"TableSizeBytes\":{d}}}}}",
|
||||
.{ desc.table_name, desc.table_status.toString(), desc.item_count, desc.table_size_bytes },
|
||||
) catch return;
|
||||
defer self.allocator.free(resp_body);
|
||||
|
||||
response.setBody(resp_body) catch {};
|
||||
}
|
||||
|
||||
fn handleListTables(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
_ = request;
|
||||
|
||||
const tables = self.engine.listTables() catch {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Failed to list tables");
|
||||
return;
|
||||
};
|
||||
defer {
|
||||
for (tables) |t| self.allocator.free(t);
|
||||
self.allocator.free(tables);
|
||||
}
|
||||
|
||||
var buf = std.ArrayList(u8){};
|
||||
defer buf.deinit(self.allocator);
|
||||
const writer = buf.writer(self.allocator);
|
||||
|
||||
writer.writeAll("{\"TableNames\":[") catch return;
|
||||
for (tables, 0..) |table, i| {
|
||||
if (i > 0) writer.writeByte(',') catch return;
|
||||
writer.print("\"{s}\"", .{table}) catch return;
|
||||
}
|
||||
writer.writeAll("]}") catch return;
|
||||
|
||||
response.setBody(buf.items) catch {};
|
||||
}
|
||||
|
||||
fn handlePutItem(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
// Extract Item from request - simplified extraction
|
||||
const item_start = std.mem.indexOf(u8, request.body, "\"Item\":") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing Item");
|
||||
return;
|
||||
};
|
||||
|
||||
// Find matching brace for Item value
|
||||
var brace_count: i32 = 0;
|
||||
var item_json_start: usize = 0;
|
||||
var item_json_end: usize = 0;
|
||||
|
||||
for (request.body[item_start..], 0..) |char, i| {
|
||||
if (char == '{') {
|
||||
if (brace_count == 0) item_json_start = item_start + i;
|
||||
brace_count += 1;
|
||||
} else if (char == '}') {
|
||||
brace_count -= 1;
|
||||
if (brace_count == 0) {
|
||||
item_json_end = item_start + i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (item_json_start == 0 or item_json_end == 0) {
|
||||
_ = self.errorResponse(response, .ValidationException, "Invalid Item format");
|
||||
return;
|
||||
}
|
||||
|
||||
const item_json = request.body[item_json_start..item_json_end];
|
||||
|
||||
self.engine.putItem(table_name, item_json) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableNotFound => {
|
||||
_ = self.errorResponse(response, .ResourceNotFoundException, "Table not found");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Failed to put item");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
response.setBody("{}") catch {};
|
||||
}
|
||||
|
||||
fn handleGetItem(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
// Extract Key from request
|
||||
const key_start = std.mem.indexOf(u8, request.body, "\"Key\":") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing Key");
|
||||
return;
|
||||
};
|
||||
|
||||
var brace_count: i32 = 0;
|
||||
var key_json_start: usize = 0;
|
||||
var key_json_end: usize = 0;
|
||||
|
||||
for (request.body[key_start..], 0..) |char, i| {
|
||||
if (char == '{') {
|
||||
if (brace_count == 0) key_json_start = key_start + i;
|
||||
brace_count += 1;
|
||||
} else if (char == '}') {
|
||||
brace_count -= 1;
|
||||
if (brace_count == 0) {
|
||||
key_json_end = key_start + i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const key_json = request.body[key_json_start..key_json_end];
|
||||
|
||||
const item = self.engine.getItem(table_name, key_json) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableNotFound => {
|
||||
_ = self.errorResponse(response, .ResourceNotFoundException, "Table not found");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Failed to get item");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
if (item) |i| {
|
||||
defer self.allocator.free(i);
|
||||
const resp = std.fmt.allocPrint(self.allocator, "{{\"Item\":{s}}}", .{i}) catch return;
|
||||
defer self.allocator.free(resp);
|
||||
response.setBody(resp) catch {};
|
||||
} else {
|
||||
response.setBody("{}") catch {};
|
||||
}
|
||||
}
|
||||
|
||||
fn handleDeleteItem(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
const key_start = std.mem.indexOf(u8, request.body, "\"Key\":") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing Key");
|
||||
return;
|
||||
};
|
||||
|
||||
var brace_count: i32 = 0;
|
||||
var key_json_start: usize = 0;
|
||||
var key_json_end: usize = 0;
|
||||
|
||||
for (request.body[key_start..], 0..) |char, i| {
|
||||
if (char == '{') {
|
||||
if (brace_count == 0) key_json_start = key_start + i;
|
||||
brace_count += 1;
|
||||
} else if (char == '}') {
|
||||
brace_count -= 1;
|
||||
if (brace_count == 0) {
|
||||
key_json_end = key_start + i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const key_json = request.body[key_json_start..key_json_end];
|
||||
|
||||
self.engine.deleteItem(table_name, key_json) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableNotFound => {
|
||||
_ = self.errorResponse(response, .ResourceNotFoundException, "Table not found");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Failed to delete item");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
response.setBody("{}") catch {};
|
||||
}
|
||||
|
||||
fn handleQuery(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
// Simplified: extract partition key from KeyConditionExpression
|
||||
// In production, would need full expression parsing
|
||||
const pk_value = extractJsonString(request.body, ":pk") orelse "default";
|
||||
|
||||
const items = self.engine.query(table_name, pk_value, null) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableNotFound => {
|
||||
_ = self.errorResponse(response, .ResourceNotFoundException, "Table not found");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Query failed");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
defer {
|
||||
for (items) |item| self.allocator.free(item);
|
||||
self.allocator.free(items);
|
||||
}
|
||||
|
||||
self.writeItemsResponse(response, items);
|
||||
}
|
||||
|
||||
fn handleScan(self: *Self, request: *const http.Request, response: *http.Response) void {
|
||||
const table_name = extractJsonString(request.body, "TableName") orelse {
|
||||
_ = self.errorResponse(response, .ValidationException, "Missing TableName");
|
||||
return;
|
||||
};
|
||||
|
||||
const items = self.engine.scan(table_name, null) catch |err| {
|
||||
switch (err) {
|
||||
storage.StorageError.TableNotFound => {
|
||||
_ = self.errorResponse(response, .ResourceNotFoundException, "Table not found");
|
||||
},
|
||||
else => {
|
||||
_ = self.errorResponse(response, .InternalServerError, "Scan failed");
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
defer {
|
||||
for (items) |item| self.allocator.free(item);
|
||||
self.allocator.free(items);
|
||||
}
|
||||
|
||||
self.writeItemsResponse(response, items);
|
||||
}
|
||||
|
||||
fn writeItemsResponse(self: *Self, response: *http.Response, items: []const []const u8) void {
|
||||
var buf = std.ArrayList(u8){};
|
||||
defer buf.deinit(self.allocator);
|
||||
const writer = buf.writer(self.allocator);
|
||||
|
||||
writer.writeAll("{\"Items\":[") catch return;
|
||||
for (items, 0..) |item, i| {
|
||||
if (i > 0) writer.writeByte(',') catch return;
|
||||
writer.writeAll(item) catch return;
|
||||
}
|
||||
writer.print("],\"Count\":{d},\"ScannedCount\":{d}}}", .{ items.len, items.len }) catch return;
|
||||
|
||||
response.setBody(buf.items) catch {};
|
||||
}
|
||||
|
||||
fn errorResponse(self: *Self, response: *http.Response, err_type: types.DynamoDBErrorType, message: []const u8) http.Response {
|
||||
response.setStatus(switch (err_type) {
|
||||
.ResourceNotFoundException => .not_found,
|
||||
.ResourceInUseException => .conflict,
|
||||
.ValidationException => .bad_request,
|
||||
else => .internal_server_error,
|
||||
});
|
||||
|
||||
const body = err_type.toErrorResponse(message, self.allocator) catch return response.*;
|
||||
response.setBody(body) catch {};
|
||||
self.allocator.free(body);
|
||||
return response.*;
|
||||
}
|
||||
};
|
||||
|
||||
fn extractJsonString(json_data: []const u8, key: []const u8) ?[]const u8 {
|
||||
// Search for "key":"value" pattern
|
||||
var search_buf: [256]u8 = undefined;
|
||||
const search = std.fmt.bufPrint(&search_buf, "\"{s}\":\"", .{key}) catch return null;
|
||||
|
||||
const start = std.mem.indexOf(u8, json_data, search) orelse return null;
|
||||
const value_start = start + search.len;
|
||||
const value_end = std.mem.indexOfPos(u8, json_data, value_start, "\"") orelse return null;
|
||||
return json_data[value_start..value_end];
|
||||
}
|
||||
|
||||
// Global handler for use with http.Server
|
||||
var global_handler: ?*ApiHandler = null;
|
||||
|
||||
pub fn setGlobalHandler(handler: *ApiHandler) void {
|
||||
global_handler = handler;
|
||||
}
|
||||
|
||||
pub fn httpHandler(request: *const http.Request, allocator: std.mem.Allocator) http.Response {
|
||||
if (global_handler) |h| {
|
||||
return h.handle(request);
|
||||
}
|
||||
|
||||
var response = http.Response.init(allocator);
|
||||
response.setStatus(.internal_server_error);
|
||||
response.setBody("{\"error\":\"Handler not initialized\"}") catch {};
|
||||
return response;
|
||||
}
|
||||
387
src/dynamodb/storage.zig
Normal file
387
src/dynamodb/storage.zig
Normal file
@@ -0,0 +1,387 @@
|
||||
/// Storage engine mapping DynamoDB operations to RocksDB
|
||||
const std = @import("std");
|
||||
const rocksdb = @import("../rocksdb.zig");
|
||||
const types = @import("types.zig");
|
||||
|
||||
pub const StorageError = error{
|
||||
TableNotFound,
|
||||
TableAlreadyExists,
|
||||
ItemNotFound,
|
||||
InvalidKey,
|
||||
SerializationError,
|
||||
RocksDBError,
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
/// Key prefixes for different data types in RocksDB
|
||||
const KeyPrefix = struct {
|
||||
/// Table metadata: _meta:{table_name}
|
||||
const meta = "_meta:";
|
||||
/// Item data: _data:{table_name}:{partition_key}[:{sort_key}]
|
||||
const data = "_data:";
|
||||
/// Global secondary index: _gsi:{table_name}:{index_name}:{pk}:{sk}
|
||||
const gsi = "_gsi:";
|
||||
/// Local secondary index: _lsi:{table_name}:{index_name}:{pk}:{sk}
|
||||
const lsi = "_lsi:";
|
||||
};
|
||||
|
||||
pub const StorageEngine = struct {
|
||||
db: rocksdb.DB,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, data_dir: [*:0]const u8) !Self {
|
||||
const db = rocksdb.DB.open(data_dir, true) catch return StorageError.RocksDBError;
|
||||
return Self{
|
||||
.db = db,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.db.close();
|
||||
}
|
||||
|
||||
// === Table Operations ===
|
||||
|
||||
pub fn createTable(self: *Self, table_name: []const u8, key_schema: []const types.KeySchemaElement, attribute_definitions: []const types.AttributeDefinition) StorageError!types.TableDescription {
|
||||
// Check if table already exists
|
||||
const meta_key = try self.buildMetaKey(table_name);
|
||||
defer self.allocator.free(meta_key);
|
||||
|
||||
const existing = self.db.get(self.allocator, meta_key) catch return StorageError.RocksDBError;
|
||||
if (existing) |e| {
|
||||
self.allocator.free(e);
|
||||
return StorageError.TableAlreadyExists;
|
||||
}
|
||||
|
||||
// Create table metadata
|
||||
const now = std.time.timestamp();
|
||||
const desc = types.TableDescription{
|
||||
.table_name = table_name,
|
||||
.key_schema = key_schema,
|
||||
.attribute_definitions = attribute_definitions,
|
||||
.table_status = .ACTIVE,
|
||||
.creation_date_time = now,
|
||||
.item_count = 0,
|
||||
.table_size_bytes = 0,
|
||||
};
|
||||
|
||||
// Serialize and store
|
||||
const meta_value = try self.serializeTableMetadata(desc);
|
||||
defer self.allocator.free(meta_value);
|
||||
|
||||
self.db.put(meta_key, meta_value) catch return StorageError.RocksDBError;
|
||||
return desc;
|
||||
}
|
||||
|
||||
pub fn deleteTable(self: *Self, table_name: []const u8) StorageError!void {
|
||||
const meta_key = try self.buildMetaKey(table_name);
|
||||
defer self.allocator.free(meta_key);
|
||||
|
||||
// Verify table exists
|
||||
const existing = self.db.get(self.allocator, meta_key) catch return StorageError.RocksDBError;
|
||||
if (existing == null) return StorageError.TableNotFound;
|
||||
self.allocator.free(existing.?);
|
||||
|
||||
// Delete all items with this table's prefix
|
||||
const data_prefix = try self.buildDataPrefix(table_name);
|
||||
defer self.allocator.free(data_prefix);
|
||||
|
||||
var batch = rocksdb.WriteBatch.init() orelse return StorageError.RocksDBError;
|
||||
defer batch.deinit();
|
||||
|
||||
// Scan and delete all matching keys
|
||||
var iter = rocksdb.Iterator.init(&self.db) orelse return StorageError.RocksDBError;
|
||||
defer iter.deinit();
|
||||
|
||||
iter.seek(data_prefix);
|
||||
while (iter.valid()) {
|
||||
const key = iter.key() orelse break;
|
||||
if (!std.mem.startsWith(u8, key, data_prefix)) break;
|
||||
batch.delete(key);
|
||||
iter.next();
|
||||
}
|
||||
|
||||
// Delete metadata
|
||||
batch.delete(meta_key);
|
||||
|
||||
batch.write(&self.db) catch return StorageError.RocksDBError;
|
||||
}
|
||||
|
||||
pub fn describeTable(self: *Self, table_name: []const u8) StorageError!types.TableDescription {
|
||||
const meta_key = try self.buildMetaKey(table_name);
|
||||
defer self.allocator.free(meta_key);
|
||||
|
||||
const meta_value = self.db.get(self.allocator, meta_key) catch return StorageError.RocksDBError;
|
||||
if (meta_value == null) return StorageError.TableNotFound;
|
||||
defer self.allocator.free(meta_value.?);
|
||||
|
||||
return self.deserializeTableMetadata(meta_value.?);
|
||||
}
|
||||
|
||||
pub fn listTables(self: *Self) StorageError![][]const u8 {
|
||||
var tables = std.ArrayList([]const u8){};
|
||||
errdefer {
|
||||
for (tables.items) |t| self.allocator.free(t);
|
||||
tables.deinit(self.allocator);
|
||||
}
|
||||
|
||||
var iter = rocksdb.Iterator.init(&self.db) orelse return StorageError.RocksDBError;
|
||||
defer iter.deinit();
|
||||
|
||||
iter.seek(KeyPrefix.meta);
|
||||
while (iter.valid()) {
|
||||
const key = iter.key() orelse break;
|
||||
if (!std.mem.startsWith(u8, key, KeyPrefix.meta)) break;
|
||||
|
||||
const table_name = key[KeyPrefix.meta.len..];
|
||||
const owned_name = self.allocator.dupe(u8, table_name) catch return StorageError.OutOfMemory;
|
||||
tables.append(self.allocator, owned_name) catch return StorageError.OutOfMemory;
|
||||
|
||||
iter.next();
|
||||
}
|
||||
|
||||
return tables.toOwnedSlice(self.allocator) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
|
||||
// === Item Operations ===
|
||||
|
||||
pub fn putItem(self: *Self, table_name: []const u8, item_json: []const u8) StorageError!void {
|
||||
// Verify table exists
|
||||
const meta_key = try self.buildMetaKey(table_name);
|
||||
defer self.allocator.free(meta_key);
|
||||
|
||||
const meta = self.db.get(self.allocator, meta_key) catch return StorageError.RocksDBError;
|
||||
if (meta == null) return StorageError.TableNotFound;
|
||||
defer self.allocator.free(meta.?);
|
||||
|
||||
// Extract key from item (simplified - assumes key is extractable from JSON)
|
||||
const item_key = try self.extractKeyFromItem(table_name, item_json);
|
||||
defer self.allocator.free(item_key);
|
||||
|
||||
self.db.put(item_key, item_json) catch return StorageError.RocksDBError;
|
||||
}
|
||||
|
||||
pub fn getItem(self: *Self, table_name: []const u8, key_json: []const u8) StorageError!?[]u8 {
|
||||
const item_key = try self.buildItemKey(table_name, key_json);
|
||||
defer self.allocator.free(item_key);
|
||||
|
||||
return self.db.get(self.allocator, item_key) catch return StorageError.RocksDBError;
|
||||
}
|
||||
|
||||
pub fn deleteItem(self: *Self, table_name: []const u8, key_json: []const u8) StorageError!void {
|
||||
const item_key = try self.buildItemKey(table_name, key_json);
|
||||
defer self.allocator.free(item_key);
|
||||
|
||||
self.db.delete(item_key) catch return StorageError.RocksDBError;
|
||||
}
|
||||
|
||||
pub fn scan(self: *Self, table_name: []const u8, limit: ?usize) StorageError![][]const u8 {
|
||||
const data_prefix = try self.buildDataPrefix(table_name);
|
||||
defer self.allocator.free(data_prefix);
|
||||
|
||||
var items = std.ArrayList([]const u8){};
|
||||
errdefer {
|
||||
for (items.items) |item| self.allocator.free(item);
|
||||
items.deinit(self.allocator);
|
||||
}
|
||||
|
||||
var iter = rocksdb.Iterator.init(&self.db) orelse return StorageError.RocksDBError;
|
||||
defer iter.deinit();
|
||||
|
||||
var count: usize = 0;
|
||||
const max_items = limit orelse std.math.maxInt(usize);
|
||||
|
||||
iter.seek(data_prefix);
|
||||
while (iter.valid() and count < max_items) {
|
||||
const key = iter.key() orelse break;
|
||||
if (!std.mem.startsWith(u8, key, data_prefix)) break;
|
||||
|
||||
const value = iter.value() orelse break;
|
||||
const owned_value = self.allocator.dupe(u8, value) catch return StorageError.OutOfMemory;
|
||||
items.append(self.allocator, owned_value) catch return StorageError.OutOfMemory;
|
||||
|
||||
count += 1;
|
||||
iter.next();
|
||||
}
|
||||
|
||||
return items.toOwnedSlice(self.allocator) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
|
||||
pub fn query(self: *Self, table_name: []const u8, partition_key: []const u8, limit: ?usize) StorageError![][]const u8 {
|
||||
// Build prefix for this partition
|
||||
const prefix = try self.buildPartitionPrefix(table_name, partition_key);
|
||||
defer self.allocator.free(prefix);
|
||||
|
||||
var items = std.ArrayList([]const u8){};
|
||||
errdefer {
|
||||
for (items.items) |item| self.allocator.free(item);
|
||||
items.deinit(self.allocator);
|
||||
}
|
||||
|
||||
var iter = rocksdb.Iterator.init(&self.db) orelse return StorageError.RocksDBError;
|
||||
defer iter.deinit();
|
||||
|
||||
var count: usize = 0;
|
||||
const max_items = limit orelse std.math.maxInt(usize);
|
||||
|
||||
iter.seek(prefix);
|
||||
while (iter.valid() and count < max_items) {
|
||||
const key = iter.key() orelse break;
|
||||
if (!std.mem.startsWith(u8, key, prefix)) break;
|
||||
|
||||
const value = iter.value() orelse break;
|
||||
const owned_value = self.allocator.dupe(u8, value) catch return StorageError.OutOfMemory;
|
||||
items.append(self.allocator, owned_value) catch return StorageError.OutOfMemory;
|
||||
|
||||
count += 1;
|
||||
iter.next();
|
||||
}
|
||||
|
||||
return items.toOwnedSlice(self.allocator) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
|
||||
// === Key Building Helpers ===
|
||||
|
||||
fn buildMetaKey(self: *Self, table_name: []const u8) StorageError![]u8 {
|
||||
return std.fmt.allocPrint(self.allocator, "{s}{s}", .{ KeyPrefix.meta, table_name }) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
|
||||
fn buildDataPrefix(self: *Self, table_name: []const u8) StorageError![]u8 {
|
||||
return std.fmt.allocPrint(self.allocator, "{s}{s}:", .{ KeyPrefix.data, table_name }) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
|
||||
fn buildPartitionPrefix(self: *Self, table_name: []const u8, partition_key: []const u8) StorageError![]u8 {
|
||||
return std.fmt.allocPrint(self.allocator, "{s}{s}:{s}", .{ KeyPrefix.data, table_name, partition_key }) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
|
||||
fn buildItemKey(self: *Self, table_name: []const u8, key_json: []const u8) StorageError![]u8 {
|
||||
// Parse the key JSON to extract partition key (and sort key if present)
|
||||
// For now, use simplified key extraction
|
||||
const pk = extractStringValue(key_json, "pk") orelse extractStringValue(key_json, "PK") orelse return StorageError.InvalidKey;
|
||||
const sk = extractStringValue(key_json, "sk") orelse extractStringValue(key_json, "SK");
|
||||
|
||||
if (sk) |sort_key| {
|
||||
return std.fmt.allocPrint(self.allocator, "{s}{s}:{s}:{s}", .{ KeyPrefix.data, table_name, pk, sort_key }) catch return StorageError.OutOfMemory;
|
||||
} else {
|
||||
return std.fmt.allocPrint(self.allocator, "{s}{s}:{s}", .{ KeyPrefix.data, table_name, pk }) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
}
|
||||
|
||||
fn extractKeyFromItem(self: *Self, table_name: []const u8, item_json: []const u8) StorageError![]u8 {
|
||||
return self.buildItemKey(table_name, item_json);
|
||||
}
|
||||
|
||||
// === Serialization Helpers ===
|
||||
|
||||
fn serializeTableMetadata(self: *Self, desc: types.TableDescription) StorageError![]u8 {
|
||||
var buf = std.ArrayList(u8){};
|
||||
errdefer buf.deinit(self.allocator);
|
||||
const writer = buf.writer(self.allocator);
|
||||
|
||||
writer.print("{{\"TableName\":\"{s}\",\"TableStatus\":\"{s}\",\"CreationDateTime\":{d},\"ItemCount\":{d},\"TableSizeBytes\":{d},\"KeySchema\":[", .{
|
||||
desc.table_name,
|
||||
desc.table_status.toString(),
|
||||
desc.creation_date_time,
|
||||
desc.item_count,
|
||||
desc.table_size_bytes,
|
||||
}) catch return StorageError.SerializationError;
|
||||
|
||||
for (desc.key_schema, 0..) |ks, i| {
|
||||
if (i > 0) writer.writeByte(',') catch return StorageError.SerializationError;
|
||||
writer.print("{{\"AttributeName\":\"{s}\",\"KeyType\":\"{s}\"}}", .{
|
||||
ks.attribute_name,
|
||||
ks.key_type.toString(),
|
||||
}) catch return StorageError.SerializationError;
|
||||
}
|
||||
|
||||
writer.writeAll("],\"AttributeDefinitions\":[") catch return StorageError.SerializationError;
|
||||
|
||||
for (desc.attribute_definitions, 0..) |ad, i| {
|
||||
if (i > 0) writer.writeByte(',') catch return StorageError.SerializationError;
|
||||
writer.print("{{\"AttributeName\":\"{s}\",\"AttributeType\":\"{s}\"}}", .{
|
||||
ad.attribute_name,
|
||||
ad.attribute_type.toString(),
|
||||
}) catch return StorageError.SerializationError;
|
||||
}
|
||||
|
||||
writer.writeAll("]}") catch return StorageError.SerializationError;
|
||||
|
||||
return buf.toOwnedSlice(self.allocator) catch return StorageError.OutOfMemory;
|
||||
}
|
||||
|
||||
fn deserializeTableMetadata(self: *Self, data: []const u8) StorageError!types.TableDescription {
|
||||
// Simplified deserialization - in production, use proper JSON parsing
|
||||
_ = self;
|
||||
|
||||
const table_name = extractStringValue(data, "TableName") orelse return StorageError.SerializationError;
|
||||
const status_str = extractStringValue(data, "TableStatus") orelse "ACTIVE";
|
||||
|
||||
const status: types.TableStatus = if (std.mem.eql(u8, status_str, "ACTIVE"))
|
||||
.ACTIVE
|
||||
else if (std.mem.eql(u8, status_str, "CREATING"))
|
||||
.CREATING
|
||||
else
|
||||
.ACTIVE;
|
||||
|
||||
return types.TableDescription{
|
||||
.table_name = table_name,
|
||||
.key_schema = &[_]types.KeySchemaElement{},
|
||||
.attribute_definitions = &[_]types.AttributeDefinition{},
|
||||
.table_status = status,
|
||||
.creation_date_time = 0,
|
||||
.item_count = 0,
|
||||
.table_size_bytes = 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// Simple JSON string value extraction (production code should use std.json)
|
||||
fn extractStringValue(json_data: []const u8, key: []const u8) ?[]const u8 {
|
||||
// Look for "key":"value" pattern
|
||||
var search_buf: [256]u8 = undefined;
|
||||
const search_pattern = std.fmt.bufPrint(&search_buf, "\"{s}\":\"", .{key}) catch return null;
|
||||
const start = std.mem.indexOf(u8, json_data, search_pattern) orelse return null;
|
||||
const value_start = start + search_pattern.len;
|
||||
const value_end = std.mem.indexOfPos(u8, json_data, value_start, "\"") orelse return null;
|
||||
return json_data[value_start..value_end];
|
||||
}
|
||||
|
||||
test "storage basic operations" {
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
const path = "/tmp/test_storage";
|
||||
defer std.fs.deleteTreeAbsolute(path) catch {};
|
||||
|
||||
var engine = try StorageEngine.init(allocator, path);
|
||||
defer engine.deinit();
|
||||
|
||||
// Create table
|
||||
const key_schema = [_]types.KeySchemaElement{
|
||||
.{ .attribute_name = "pk", .key_type = .HASH },
|
||||
};
|
||||
const attr_defs = [_]types.AttributeDefinition{
|
||||
.{ .attribute_name = "pk", .attribute_type = .S },
|
||||
};
|
||||
|
||||
_ = try engine.createTable("TestTable", &key_schema, &attr_defs);
|
||||
|
||||
// List tables
|
||||
const tables = try engine.listTables();
|
||||
defer {
|
||||
for (tables) |t| allocator.free(t);
|
||||
allocator.free(tables);
|
||||
}
|
||||
try std.testing.expectEqual(@as(usize, 1), tables.len);
|
||||
try std.testing.expectEqualStrings("TestTable", tables[0]);
|
||||
|
||||
// Delete table
|
||||
try engine.deleteTable("TestTable");
|
||||
|
||||
// Verify deleted
|
||||
const tables2 = try engine.listTables();
|
||||
defer allocator.free(tables2);
|
||||
try std.testing.expectEqual(@as(usize, 0), tables2.len);
|
||||
}
|
||||
250
src/dynamodb/types.zig
Normal file
250
src/dynamodb/types.zig
Normal file
@@ -0,0 +1,250 @@
|
||||
/// DynamoDB protocol types and serialization
|
||||
const std = @import("std");
|
||||
|
||||
/// DynamoDB AttributeValue - the core data type
|
||||
pub const AttributeValue = union(enum) {
|
||||
S: []const u8, // String
|
||||
N: []const u8, // Number (stored as string)
|
||||
B: []const u8, // Binary (base64)
|
||||
SS: []const []const u8, // String Set
|
||||
NS: []const []const u8, // Number Set
|
||||
BS: []const []const u8, // Binary Set
|
||||
M: std.StringHashMap(AttributeValue), // Map
|
||||
L: []const AttributeValue, // List
|
||||
NULL: bool, // Null
|
||||
BOOL: bool, // Boolean
|
||||
};
|
||||
|
||||
pub const Item = std.StringHashMap(AttributeValue);
|
||||
|
||||
pub const KeyType = enum {
|
||||
HASH,
|
||||
RANGE,
|
||||
|
||||
pub fn toString(self: KeyType) []const u8 {
|
||||
return switch (self) {
|
||||
.HASH => "HASH",
|
||||
.RANGE => "RANGE",
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fromString(s: []const u8) ?KeyType {
|
||||
if (std.mem.eql(u8, s, "HASH")) return .HASH;
|
||||
if (std.mem.eql(u8, s, "RANGE")) return .RANGE;
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const ScalarAttributeType = enum {
|
||||
S,
|
||||
N,
|
||||
B,
|
||||
|
||||
pub fn toString(self: ScalarAttributeType) []const u8 {
|
||||
return switch (self) {
|
||||
.S => "S",
|
||||
.N => "N",
|
||||
.B => "B",
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fromString(s: []const u8) ?ScalarAttributeType {
|
||||
if (std.mem.eql(u8, s, "S")) return .S;
|
||||
if (std.mem.eql(u8, s, "N")) return .N;
|
||||
if (std.mem.eql(u8, s, "B")) return .B;
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const KeySchemaElement = struct {
|
||||
attribute_name: []const u8,
|
||||
key_type: KeyType,
|
||||
};
|
||||
|
||||
pub const AttributeDefinition = struct {
|
||||
attribute_name: []const u8,
|
||||
attribute_type: ScalarAttributeType,
|
||||
};
|
||||
|
||||
pub const TableStatus = enum {
|
||||
CREATING,
|
||||
UPDATING,
|
||||
DELETING,
|
||||
ACTIVE,
|
||||
INACCESSIBLE_ENCRYPTION_CREDENTIALS,
|
||||
ARCHIVING,
|
||||
ARCHIVED,
|
||||
|
||||
pub fn toString(self: TableStatus) []const u8 {
|
||||
return switch (self) {
|
||||
.CREATING => "CREATING",
|
||||
.UPDATING => "UPDATING",
|
||||
.DELETING => "DELETING",
|
||||
.ACTIVE => "ACTIVE",
|
||||
.INACCESSIBLE_ENCRYPTION_CREDENTIALS => "INACCESSIBLE_ENCRYPTION_CREDENTIALS",
|
||||
.ARCHIVING => "ARCHIVING",
|
||||
.ARCHIVED => "ARCHIVED",
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const TableDescription = struct {
|
||||
table_name: []const u8,
|
||||
key_schema: []const KeySchemaElement,
|
||||
attribute_definitions: []const AttributeDefinition,
|
||||
table_status: TableStatus,
|
||||
creation_date_time: i64,
|
||||
item_count: u64,
|
||||
table_size_bytes: u64,
|
||||
};
|
||||
|
||||
/// DynamoDB operation types parsed from X-Amz-Target header
|
||||
pub const Operation = enum {
|
||||
CreateTable,
|
||||
DeleteTable,
|
||||
DescribeTable,
|
||||
ListTables,
|
||||
UpdateTable,
|
||||
PutItem,
|
||||
GetItem,
|
||||
DeleteItem,
|
||||
UpdateItem,
|
||||
Query,
|
||||
Scan,
|
||||
BatchGetItem,
|
||||
BatchWriteItem,
|
||||
TransactGetItems,
|
||||
TransactWriteItems,
|
||||
Unknown,
|
||||
|
||||
pub fn fromTarget(target: []const u8) Operation {
|
||||
// Format: DynamoDB_20120810.OperationName
|
||||
const prefix = "DynamoDB_20120810.";
|
||||
if (!std.mem.startsWith(u8, target, prefix)) return .Unknown;
|
||||
|
||||
const op_name = target[prefix.len..];
|
||||
const map = std.StaticStringMap(Operation).initComptime(.{
|
||||
.{ "CreateTable", .CreateTable },
|
||||
.{ "DeleteTable", .DeleteTable },
|
||||
.{ "DescribeTable", .DescribeTable },
|
||||
.{ "ListTables", .ListTables },
|
||||
.{ "UpdateTable", .UpdateTable },
|
||||
.{ "PutItem", .PutItem },
|
||||
.{ "GetItem", .GetItem },
|
||||
.{ "DeleteItem", .DeleteItem },
|
||||
.{ "UpdateItem", .UpdateItem },
|
||||
.{ "Query", .Query },
|
||||
.{ "Scan", .Scan },
|
||||
.{ "BatchGetItem", .BatchGetItem },
|
||||
.{ "BatchWriteItem", .BatchWriteItem },
|
||||
.{ "TransactGetItems", .TransactGetItems },
|
||||
.{ "TransactWriteItems", .TransactWriteItems },
|
||||
});
|
||||
return map.get(op_name) orelse .Unknown;
|
||||
}
|
||||
};
|
||||
|
||||
/// DynamoDB error types
|
||||
pub const DynamoDBErrorType = enum {
|
||||
ValidationException,
|
||||
ResourceNotFoundException,
|
||||
ResourceInUseException,
|
||||
ConditionalCheckFailedException,
|
||||
ProvisionedThroughputExceededException,
|
||||
ItemCollectionSizeLimitExceededException,
|
||||
InternalServerError,
|
||||
SerializationException,
|
||||
|
||||
pub fn toErrorResponse(self: DynamoDBErrorType, message: []const u8, allocator: std.mem.Allocator) ![]u8 {
|
||||
const type_str = switch (self) {
|
||||
.ValidationException => "com.amazonaws.dynamodb.v20120810#ValidationException",
|
||||
.ResourceNotFoundException => "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException",
|
||||
.ResourceInUseException => "com.amazonaws.dynamodb.v20120810#ResourceInUseException",
|
||||
.ConditionalCheckFailedException => "com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException",
|
||||
.ProvisionedThroughputExceededException => "com.amazonaws.dynamodb.v20120810#ProvisionedThroughputExceededException",
|
||||
.ItemCollectionSizeLimitExceededException => "com.amazonaws.dynamodb.v20120810#ItemCollectionSizeLimitExceededException",
|
||||
.InternalServerError => "com.amazonaws.dynamodb.v20120810#InternalServerError",
|
||||
.SerializationException => "com.amazonaws.dynamodb.v20120810#SerializationException",
|
||||
};
|
||||
|
||||
return std.fmt.allocPrint(allocator, "{{\"__type\":\"{s}\",\"message\":\"{s}\"}}", .{ type_str, message });
|
||||
}
|
||||
};
|
||||
|
||||
// JSON serialization helpers for DynamoDB format
|
||||
pub const json = struct {
|
||||
/// Serialize AttributeValue to DynamoDB JSON format
|
||||
pub fn serializeAttributeValue(writer: anytype, value: AttributeValue) !void {
|
||||
switch (value) {
|
||||
.S => |s| try writer.print("{{\"S\":\"{s}\"}}", .{s}),
|
||||
.N => |n| try writer.print("{{\"N\":\"{s}\"}}", .{n}),
|
||||
.B => |b| try writer.print("{{\"B\":\"{s}\"}}", .{b}),
|
||||
.BOOL => |b| try writer.print("{{\"BOOL\":{}}}", .{b}),
|
||||
.NULL => try writer.writeAll("{\"NULL\":true}"),
|
||||
.SS => |ss| {
|
||||
try writer.writeAll("{\"SS\":[");
|
||||
for (ss, 0..) |s, i| {
|
||||
if (i > 0) try writer.writeByte(',');
|
||||
try writer.print("\"{s}\"", .{s});
|
||||
}
|
||||
try writer.writeAll("]}");
|
||||
},
|
||||
.NS => |ns| {
|
||||
try writer.writeAll("{\"NS\":[");
|
||||
for (ns, 0..) |n, i| {
|
||||
if (i > 0) try writer.writeByte(',');
|
||||
try writer.print("\"{s}\"", .{n});
|
||||
}
|
||||
try writer.writeAll("]}");
|
||||
},
|
||||
.BS => |bs| {
|
||||
try writer.writeAll("{\"BS\":[");
|
||||
for (bs, 0..) |b, i| {
|
||||
if (i > 0) try writer.writeByte(',');
|
||||
try writer.print("\"{s}\"", .{b});
|
||||
}
|
||||
try writer.writeAll("]}");
|
||||
},
|
||||
.L => |list| {
|
||||
try writer.writeAll("{\"L\":[");
|
||||
for (list, 0..) |item, i| {
|
||||
if (i > 0) try writer.writeByte(',');
|
||||
try serializeAttributeValue(writer, item);
|
||||
}
|
||||
try writer.writeAll("]}");
|
||||
},
|
||||
.M => |map| {
|
||||
try writer.writeAll("{\"M\":{");
|
||||
var first = true;
|
||||
var iter = map.iterator();
|
||||
while (iter.next()) |entry| {
|
||||
if (!first) try writer.writeByte(',');
|
||||
first = false;
|
||||
try writer.print("\"{s}\":", .{entry.key_ptr.*});
|
||||
try serializeAttributeValue(writer, entry.value_ptr.*);
|
||||
}
|
||||
try writer.writeAll("}}");
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize an Item (map of attribute name to AttributeValue)
|
||||
pub fn serializeItem(writer: anytype, item: Item) !void {
|
||||
try writer.writeByte('{');
|
||||
var first = true;
|
||||
var iter = item.iterator();
|
||||
while (iter.next()) |entry| {
|
||||
if (!first) try writer.writeByte(',');
|
||||
first = false;
|
||||
try writer.print("\"{s}\":", .{entry.key_ptr.*});
|
||||
try serializeAttributeValue(writer, entry.value_ptr.*);
|
||||
}
|
||||
try writer.writeByte('}');
|
||||
}
|
||||
};
|
||||
|
||||
test "operation from target" {
|
||||
try std.testing.expectEqual(Operation.CreateTable, Operation.fromTarget("DynamoDB_20120810.CreateTable"));
|
||||
try std.testing.expectEqual(Operation.PutItem, Operation.fromTarget("DynamoDB_20120810.PutItem"));
|
||||
try std.testing.expectEqual(Operation.Unknown, Operation.fromTarget("Invalid"));
|
||||
}
|
||||
Reference in New Issue
Block a user