Compare commits
28 Commits
06ed6a2c97
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 5ee3df86f1 | |||
| 47eefd0fe5 | |||
| 443562dfb6 | |||
| 9cf54e1b9f | |||
| a7f2a5ab59 | |||
| 6bc1a03347 | |||
| 178b38fe18 | |||
| b92dc61b08 | |||
| 64da021148 | |||
| 12ba2e57d7 | |||
| a5a5d41e50 | |||
| d8a80bd728 | |||
| 225a1533cc | |||
| a6bf357228 | |||
| 78a4ea7a0c | |||
| 228b422393 | |||
| 96de080d10 | |||
| a77676bbc7 | |||
| 4404f2796d | |||
| 26281bc16d | |||
| 29fe8a60c3 | |||
| f8b0b1c3ae | |||
| 089ef39bd9 | |||
| 9518eb255e | |||
| f0d3eca5cb | |||
| c8ada180ce | |||
| 4b8e424085 | |||
| 96896a0f97 |
409
ARCHITECTURE.md
409
ARCHITECTURE.md
@@ -1,409 +0,0 @@
|
||||
## JormunDB Architecture
|
||||
# !!THIS IS NO LONGER ENTIRELY ACCURATE IGNORE OR UPDATE WITH ACCURATE INFO!!
|
||||
|
||||
This document explains the internal architecture of JormunDB, including design decisions, storage formats, and the arena-per-request memory management pattern.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Why Odin?](#why-odin)
|
||||
- [Memory Management](#memory-management)
|
||||
- [Storage Format](#storage-format)
|
||||
- [Module Structure](#module-structure)
|
||||
- [Request Flow](#request-flow)
|
||||
- [Concurrency Model](#concurrency-model)
|
||||
|
||||
## Overview
|
||||
|
||||
JormunDB is a DynamoDB-compatible database server that speaks the DynamoDB wire protocol. It uses RocksDB for persistent storage and is written in Odin for elegant memory management.
|
||||
|
||||
### Key Design Goals
|
||||
|
||||
1. **Zero allocation ceremony** - No explicit `defer free()` or error handling for every allocation
|
||||
2. **Binary storage** - Efficient TLV encoding instead of JSON
|
||||
3. **API compatibility** - Drop-in replacement for DynamoDB
|
||||
4. **Performance** - RocksDB-backed with efficient key encoding
|
||||
|
||||
## Why Odin?
|
||||
|
||||
The original implementation in Zig suffered from explicit allocator threading:
|
||||
|
||||
```zig
|
||||
// Zig version - explicit allocator everywhere
|
||||
fn handleRequest(allocator: std.mem.Allocator, request: []const u8) !Response {
|
||||
const parsed = try parseJson(allocator, request);
|
||||
defer parsed.deinit(allocator);
|
||||
|
||||
const item = try storage.getItem(allocator, parsed.table_name, parsed.key);
|
||||
defer if (item) |i| freeItem(allocator, i);
|
||||
|
||||
const response = try serializeResponse(allocator, item);
|
||||
defer allocator.free(response);
|
||||
|
||||
return response; // Wait, we deferred the free!
|
||||
}
|
||||
```
|
||||
|
||||
Odin's context allocator system eliminates this:
|
||||
|
||||
```odin
|
||||
// Odin version - implicit context allocator
|
||||
handle_request :: proc(request: []byte) -> Response {
|
||||
// All allocations use context.allocator automatically
|
||||
parsed := parse_json(request)
|
||||
item := storage_get_item(parsed.table_name, parsed.key)
|
||||
response := serialize_response(item)
|
||||
|
||||
return response
|
||||
// Everything freed when arena is destroyed
|
||||
}
|
||||
```
|
||||
|
||||
## Memory Management
|
||||
|
||||
JormunDB uses a two-allocator strategy:
|
||||
|
||||
### 1. Arena Allocator (Request-Scoped)
|
||||
|
||||
Every HTTP request gets its own arena:
|
||||
|
||||
```odin
|
||||
handle_connection :: proc(conn: net.TCP_Socket) {
|
||||
// Create arena for this request (4MB)
|
||||
arena: mem.Arena
|
||||
mem.arena_init(&arena, make([]byte, mem.Megabyte * 4))
|
||||
defer mem.arena_destroy(&arena)
|
||||
|
||||
// Set context allocator
|
||||
context.allocator = mem.arena_allocator(&arena)
|
||||
|
||||
// All downstream code uses context.allocator
|
||||
request := parse_http_request(conn) // uses arena
|
||||
response := handle_request(request) // uses arena
|
||||
send_response(conn, response) // uses arena
|
||||
|
||||
// Arena is freed here - everything cleaned up automatically
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- No individual `free()` calls needed
|
||||
- No `errdefer` cleanup
|
||||
- No use-after-free bugs
|
||||
- No memory leaks from forgotten frees
|
||||
- Predictable performance (no GC pauses)
|
||||
|
||||
### 2. Default Allocator (Long-Lived Data)
|
||||
|
||||
The default allocator (typically `context.allocator` at program start) is used for:
|
||||
|
||||
- Table metadata
|
||||
- Table locks (sync.RW_Mutex)
|
||||
- Engine state
|
||||
- Items returned from storage layer (copied to request arena when needed)
|
||||
|
||||
## Storage Format
|
||||
|
||||
### Binary Keys (Varint-Prefixed Segments)
|
||||
|
||||
All keys use varint length prefixes for space efficiency:
|
||||
|
||||
```
|
||||
Meta key: [0x01][len][table_name]
|
||||
Data key: [0x02][len][table_name][len][pk_value][len][sk_value]?
|
||||
GSI key: [0x03][len][table_name][len][index_name][len][gsi_pk][len][gsi_sk]?
|
||||
LSI key: [0x04][len][table_name][len][index_name][len][pk][len][lsi_sk]
|
||||
```
|
||||
|
||||
**Example Data Key:**
|
||||
```
|
||||
Table: "Users"
|
||||
PK: "user:123"
|
||||
SK: "profile"
|
||||
|
||||
Encoded:
|
||||
[0x02] // Entity type (Data)
|
||||
[0x05] // Table name length (5)
|
||||
Users // Table name bytes
|
||||
[0x08] // PK length (8)
|
||||
user:123 // PK bytes
|
||||
[0x07] // SK length (7)
|
||||
profile // SK bytes
|
||||
```
|
||||
|
||||
### Item Encoding (TLV Format)
|
||||
|
||||
Items use Tag-Length-Value encoding for space efficiency:
|
||||
|
||||
```
|
||||
Format:
|
||||
[attr_count:varint]
|
||||
[name_len:varint][name:bytes][type_tag:u8][value_len:varint][value:bytes]...
|
||||
|
||||
Type Tags:
|
||||
String = 0x01 Number = 0x02 Binary = 0x03
|
||||
Bool = 0x04 Null = 0x05
|
||||
SS = 0x10 NS = 0x11 BS = 0x12
|
||||
List = 0x20 Map = 0x21
|
||||
```
|
||||
|
||||
**Example Item:**
|
||||
```json
|
||||
{
|
||||
"id": {"S": "user123"},
|
||||
"age": {"N": "30"}
|
||||
}
|
||||
```
|
||||
|
||||
Encoded as:
|
||||
```
|
||||
[0x02] // 2 attributes
|
||||
[0x02] // name length (2)
|
||||
id // name bytes
|
||||
[0x01] // type tag (String)
|
||||
[0x07] // value length (7)
|
||||
user123 // value bytes
|
||||
|
||||
[0x03] // name length (3)
|
||||
age // name bytes
|
||||
[0x02] // type tag (Number)
|
||||
[0x02] // value length (2)
|
||||
30 // value bytes (stored as string)
|
||||
```
|
||||
|
||||
## Request Flow
|
||||
|
||||
```
|
||||
1. HTTP POST / arrives
|
||||
↓
|
||||
2. Create arena allocator (4MB)
|
||||
Set context.allocator = arena_allocator
|
||||
↓
|
||||
3. Parse HTTP headers
|
||||
Extract X-Amz-Target → Operation
|
||||
↓
|
||||
4. Parse JSON body
|
||||
Convert DynamoDB JSON → internal types
|
||||
↓
|
||||
5. Route to handler (e.g., handle_put_item)
|
||||
↓
|
||||
6. Storage engine operation
|
||||
- Build binary key
|
||||
- Encode item to TLV
|
||||
- RocksDB put/get/delete
|
||||
↓
|
||||
7. Build response
|
||||
- Serialize item to DynamoDB JSON
|
||||
- Format HTTP response
|
||||
↓
|
||||
8. Send response
|
||||
↓
|
||||
9. Destroy arena
|
||||
All request memory freed automatically
|
||||
```
|
||||
|
||||
## Concurrency Model
|
||||
|
||||
### Table-Level RW Locks
|
||||
|
||||
Each table has a reader-writer lock:
|
||||
|
||||
```odin
|
||||
Storage_Engine :: struct {
|
||||
db: rocksdb.DB,
|
||||
table_locks: map[string]^sync.RW_Mutex,
|
||||
table_locks_mutex: sync.Mutex,
|
||||
}
|
||||
```
|
||||
|
||||
**Read Operations** (GetItem, Query, Scan):
|
||||
- Acquire shared lock
|
||||
- Multiple readers can run concurrently
|
||||
- Writers are blocked
|
||||
|
||||
**Write Operations** (PutItem, DeleteItem, UpdateItem):
|
||||
- Acquire exclusive lock
|
||||
- Only one writer at a time
|
||||
- All readers are blocked
|
||||
|
||||
### Thread Safety
|
||||
|
||||
- RocksDB handles are thread-safe (column family-based)
|
||||
- Table metadata is protected by locks
|
||||
- Request arenas are thread-local (no sharing)
|
||||
|
||||
## Error Handling
|
||||
|
||||
Odin uses explicit error returns via `or_return`:
|
||||
|
||||
```odin
|
||||
// Odin error handling
|
||||
parse_json :: proc(data: []byte) -> (Item, bool) {
|
||||
parsed := json.parse(data) or_return
|
||||
item := json_to_item(parsed) or_return
|
||||
return item, true
|
||||
}
|
||||
|
||||
// Usage
|
||||
item := parse_json(request.body) or_else {
|
||||
return error_response(.ValidationException, "Invalid JSON")
|
||||
}
|
||||
```
|
||||
|
||||
No exceptions, no panic-recover patterns. Every error path is explicit.
|
||||
|
||||
## DynamoDB Wire Protocol
|
||||
|
||||
### Request Format
|
||||
|
||||
```
|
||||
POST / HTTP/1.1
|
||||
X-Amz-Target: DynamoDB_20120810.PutItem
|
||||
Content-Type: application/x-amz-json-1.0
|
||||
|
||||
{
|
||||
"TableName": "Users",
|
||||
"Item": {
|
||||
"id": {"S": "user123"},
|
||||
"name": {"S": "Alice"}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Response Format
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/x-amz-json-1.0
|
||||
x-amzn-RequestId: local-request-id
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
### Error Format
|
||||
|
||||
```json
|
||||
{
|
||||
"__type": "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException",
|
||||
"message": "Table not found"
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Time Complexity
|
||||
|
||||
| Operation | Complexity | Notes |
|
||||
|-----------|-----------|-------|
|
||||
| PutItem | O(log n) | RocksDB LSM tree insert |
|
||||
| GetItem | O(log n) | RocksDB point lookup |
|
||||
| DeleteItem | O(log n) | RocksDB deletion |
|
||||
| Query | O(log n + m) | n = items in table, m = result set |
|
||||
| Scan | O(n) | Full table scan |
|
||||
|
||||
### Space Complexity
|
||||
|
||||
- Binary keys: ~20-100 bytes (vs 50-200 bytes JSON)
|
||||
- Binary items: ~30% smaller than JSON
|
||||
- Varint encoding saves space on small integers
|
||||
|
||||
### Benchmarks (Expected)
|
||||
|
||||
Based on Zig version performance:
|
||||
|
||||
```
|
||||
Operation Throughput Latency (p50)
|
||||
PutItem ~5,000/sec ~0.2ms
|
||||
GetItem ~7,000/sec ~0.14ms
|
||||
Query (1 item) ~8,000/sec ~0.12ms
|
||||
Scan (1000 items) ~20/sec ~50ms
|
||||
```
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Features
|
||||
|
||||
1. **UpdateExpression** - SET/REMOVE/ADD/DELETE operations
|
||||
2. **FilterExpression** - Post-query filtering
|
||||
3. **ProjectionExpression** - Return subset of attributes
|
||||
4. **Global Secondary Indexes** - Query by non-key attributes
|
||||
5. **Local Secondary Indexes** - Alternate sort keys
|
||||
6. **BatchWriteItem** - Batch mutations
|
||||
7. **BatchGetItem** - Batch reads
|
||||
8. **Transactions** - ACID multi-item operations
|
||||
|
||||
### Optimization Opportunities
|
||||
|
||||
1. **Connection pooling** - Reuse HTTP connections
|
||||
2. **Bloom filters** - Faster negative lookups
|
||||
3. **Compression** - LZ4/Zstd on large items
|
||||
4. **Caching layer** - Hot item cache
|
||||
5. **Parallel scan** - Segment-based scanning
|
||||
|
||||
## Debugging
|
||||
|
||||
### Enable Verbose Logging
|
||||
|
||||
```bash
|
||||
make run VERBOSE=1
|
||||
```
|
||||
|
||||
### Inspect RocksDB
|
||||
|
||||
```bash
|
||||
# Use ldb tool to inspect database
|
||||
ldb --db=./data scan
|
||||
ldb --db=./data get <key_hex>
|
||||
```
|
||||
|
||||
### Memory Profiling
|
||||
|
||||
Odin's tracking allocator can detect leaks:
|
||||
|
||||
```odin
|
||||
when ODIN_DEBUG {
|
||||
track: mem.Tracking_Allocator
|
||||
mem.tracking_allocator_init(&track, context.allocator)
|
||||
context.allocator = mem.tracking_allocator(&track)
|
||||
|
||||
defer {
|
||||
for _, leak in track.allocation_map {
|
||||
fmt.printfln("Leaked %d bytes at %p", leak.size, leak.location)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Migration from Zig Version
|
||||
|
||||
The Zig version (ZynamoDB) used the same binary storage format, so existing RocksDB databases can be read by JormunDB without migration.
|
||||
|
||||
### Compatibility
|
||||
|
||||
- ✅ Binary key format (byte-compatible)
|
||||
- ✅ Binary item format (byte-compatible)
|
||||
- ✅ Table metadata (JSON, compatible)
|
||||
- ✅ HTTP wire protocol (identical)
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
None - JormunDB can open ZynamoDB databases directly.
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing to JormunDB:
|
||||
|
||||
1. **Use the context allocator** - All request-scoped allocations should use `context.allocator`
|
||||
2. **Avoid manual frees** - Let the arena handle it
|
||||
3. **Long-lived data** - Use the default allocator explicitly
|
||||
4. **Test thoroughly** - Run `make test` before committing
|
||||
5. **Format code** - Run `make fmt` before committing
|
||||
|
||||
## References
|
||||
|
||||
- [Odin Language](https://odin-lang.org/)
|
||||
- [RocksDB Wiki](https://github.com/facebook/rocksdb/wiki)
|
||||
- [DynamoDB API Reference](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/)
|
||||
- [Varint Encoding](https://developers.google.com/protocol-buffers/docs/encoding#varints)
|
||||
40
Makefile
40
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all build release run test clean fmt help install sdk_test
|
||||
.PHONY: all build release run test clean fmt help install
|
||||
|
||||
# Project configuration
|
||||
PROJECT_NAME := jormundb
|
||||
@@ -147,42 +147,6 @@ check-deps:
|
||||
@pkg-config --exists rocksdb || (echo "$(RED)✗ RocksDB not found$(NC)" && exit 1)
|
||||
@echo "$(GREEN)✓ All dependencies found$(NC)"
|
||||
|
||||
# AWS CLI test commands
|
||||
aws-test: run &
|
||||
@sleep 2
|
||||
@echo "$(BLUE)Testing with AWS CLI...$(NC)"
|
||||
@echo "\n$(YELLOW)Creating table...$(NC)"
|
||||
@aws dynamodb create-table \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable \
|
||||
--key-schema AttributeName=pk,KeyType=HASH \
|
||||
--attribute-definitions AttributeName=pk,AttributeType=S \
|
||||
--billing-mode PAY_PER_REQUEST || true
|
||||
@echo "\n$(YELLOW)Listing tables...$(NC)"
|
||||
@aws dynamodb list-tables --endpoint-url http://localhost:$(PORT)
|
||||
@echo "\n$(YELLOW)Putting item...$(NC)"
|
||||
@aws dynamodb put-item \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable \
|
||||
--item '{"pk":{"S":"test1"},"data":{"S":"hello world"}}'
|
||||
@echo "\n$(YELLOW)Getting item...$(NC)"
|
||||
@aws dynamodb get-item \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable \
|
||||
--key '{"pk":{"S":"test1"}}'
|
||||
@echo "\n$(YELLOW)Scanning table...$(NC)"
|
||||
@aws dynamodb scan \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable
|
||||
@echo "\n$(GREEN)✓ AWS CLI test complete$(NC)"
|
||||
|
||||
# Python SDK integration tests (requires JormunDB running on localhost)
|
||||
sdk_test:
|
||||
@echo "$(BLUE)Running Python SDK tests against localhost:$(PORT)...$(NC)"
|
||||
@docker compose -f $(SDK_TEST_COMPOSE) down --remove-orphans 2>/dev/null || true
|
||||
@JORMUN_PORT=$(PORT) docker compose -f $(SDK_TEST_COMPOSE) run --rm --build sdk-test
|
||||
@docker compose -f $(SDK_TEST_COMPOSE) down --remove-orphans 2>/dev/null || true
|
||||
|
||||
# Development workflow
|
||||
dev: clean build run
|
||||
|
||||
@@ -207,8 +171,6 @@ help:
|
||||
@echo ""
|
||||
@echo "$(GREEN)Test Commands:$(NC)"
|
||||
@echo " make test - Run unit tests"
|
||||
@echo " make aws-test - Test with AWS CLI commands"
|
||||
@echo " make sdk_test - Run Python SDK integration tests (requires running server)"
|
||||
@echo ""
|
||||
@echo "$(GREEN)Utility Commands:$(NC)"
|
||||
@echo " make fmt - Format source code"
|
||||
|
||||
@@ -404,7 +404,6 @@ brew upgrade odin # macOS
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Read [ARCHITECTURE.md](ARCHITECTURE.md) for internals
|
||||
- Check [TODO.md](TODO.md) for implementation status
|
||||
- Browse source code in `dynamodb/`, `rocksdb/`, etc.
|
||||
|
||||
|
||||
42
README.md
42
README.md
@@ -189,16 +189,38 @@ make run PORT=9000 DATA_DIR=/tmp/db VERBOSE=1
|
||||
|
||||
## Performance
|
||||
|
||||
From benchmarks on the original Zig version (Odin expected to be similar or better):
|
||||
Benchmarked on single node localhost, 1000 iterations per test.
|
||||
|
||||
```
|
||||
Sequential Writes | 10000 ops | 245.32 ms | 40765 ops/sec
|
||||
Random Reads | 10000 ops | 312.45 ms | 32006 ops/sec
|
||||
Batch Writes | 10000 ops | 89.23 ms | 112071 ops/sec
|
||||
PutItem | 5000 ops | 892.34 ms | 5604 ops/sec
|
||||
GetItem | 5000 ops | 678.91 ms | 7365 ops/sec
|
||||
Scan (full table) | 5000 ops | 234.56 ms | 21320 ops/sec
|
||||
```
|
||||
### Basic Operations
|
||||
|
||||
| Operation | Throughput | Avg Latency | P95 Latency | P99 Latency |
|
||||
|-----------|------------|-------------|-------------|-------------|
|
||||
| **PutItem** | 1,021 ops/sec | 0.98ms | 1.02ms | 1.64ms |
|
||||
| **GetItem** | 1,207 ops/sec | 0.83ms | 0.90ms | 1.14ms |
|
||||
| **Query** | 1,002 ops/sec | 1.00ms | 1.11ms | 1.85ms |
|
||||
| **Scan** (100 items) | 18,804 ops/sec | 0.05ms | - | - |
|
||||
| **DeleteItem** | 1,254 ops/sec | 0.80ms | - | - |
|
||||
|
||||
### Batch Operations
|
||||
|
||||
| Operation | Throughput | Batch Size |
|
||||
|-----------|------------|------------|
|
||||
| **BatchWriteItem** | 9,297 ops/sec | 25 items |
|
||||
| **BatchGetItem** | 9,113 ops/sec | 25 items |
|
||||
|
||||
### Concurrent Operations
|
||||
|
||||
| Workers | Throughput | Avg Latency | P95 Latency | P99 Latency |
|
||||
|---------|------------|-------------|-------------|-------------|
|
||||
| **10 concurrent** | 1,286 ops/sec | 7.70ms | 15.16ms | 19.72ms |
|
||||
|
||||
### Large Payloads
|
||||
|
||||
| Payload Size | Throughput | Avg Latency |
|
||||
|--------------|------------|-------------|
|
||||
| **10KB** | 522 ops/sec | 1.91ms |
|
||||
| **50KB** | 166 ops/sec | 6.01ms |
|
||||
| **100KB** | 96 ops/sec | 10.33ms |
|
||||
|
||||
## API Compatibility
|
||||
|
||||
@@ -218,11 +240,11 @@ Scan (full table) | 5000 ops | 234.56 ms | 21320 ops/sec
|
||||
- ✅ ProjectionExpression
|
||||
- ✅ BatchWriteItem
|
||||
- ✅ BatchGetItem
|
||||
- ✅ Global Secondary Indexes
|
||||
|
||||
### Coming Soon
|
||||
|
||||
- ⏳ UpdateItem (works but needs UPDATED_NEW/UPDATED_OLD response filtering to work for full Dynamo Parity)
|
||||
- ⏳ Global Secondary Indexes
|
||||
- ⏳ Local Secondary Indexes
|
||||
|
||||
## Configuration
|
||||
|
||||
2
TODO.md
2
TODO.md
@@ -8,7 +8,7 @@ Goal: "aws cli works reliably for CreateTable/ListTables/PutItem/GetItem/DeleteI
|
||||
|
||||
### 1) HTTP + routing hardening
|
||||
- [ ] Audit request parsing boundaries:
|
||||
- Max body size enforcement (config exists, need to verify enforcement path)
|
||||
- Max body size enforcement — **DONE**
|
||||
- Missing/invalid headers → correct DynamoDB error types
|
||||
- Content-Type handling (be permissive but consistent)
|
||||
- [x] Ensure **all request-scoped allocations** come from the request arena (no accidental long-lived allocs)
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
services:
|
||||
sdk-test:
|
||||
image: python:3.12-slim
|
||||
network_mode: host
|
||||
working_dir: /tests
|
||||
volumes:
|
||||
- ./tests/sdk:/tests
|
||||
environment:
|
||||
- JORMUN_ENDPOINT=http://localhost:${JORMUN_PORT:-8002}
|
||||
- AWS_ACCESS_KEY_ID=local
|
||||
- AWS_SECRET_ACCESS_KEY=local
|
||||
- AWS_DEFAULT_REGION=us-east-1
|
||||
command: >
|
||||
sh -c "pip install --quiet boto3 && python test_sdk.py"
|
||||
@@ -65,6 +65,17 @@ batch_write_item :: proc(
|
||||
unprocessed = make([dynamic]Batch_Write_Table_Request),
|
||||
}
|
||||
|
||||
// Count total operations across all tables
|
||||
total_ops := 0
|
||||
for table_req in table_requests {
|
||||
total_ops += len(table_req.requests)
|
||||
}
|
||||
|
||||
// Enforce DynamoDB limit: 25 operations per batch
|
||||
if total_ops > 25 {
|
||||
return result, .Validation_Error
|
||||
}
|
||||
|
||||
for table_req in table_requests {
|
||||
failed_requests := make([dynamic]Write_Request)
|
||||
|
||||
@@ -78,13 +89,38 @@ batch_write_item :: proc(
|
||||
var_err = delete_item(engine, table_req.table_name, req.item)
|
||||
}
|
||||
|
||||
// Distinguish validation errors from transient failures
|
||||
if var_err != .None {
|
||||
// Deep copy the failed request for UnprocessedItems
|
||||
failed_item := item_deep_copy(req.item)
|
||||
append(&failed_requests, Write_Request{
|
||||
type = req.type,
|
||||
item = failed_item,
|
||||
})
|
||||
#partial switch var_err {
|
||||
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
|
||||
// Hard validation errors — fail the entire batch
|
||||
batch_write_result_destroy(&result)
|
||||
delete(failed_requests)
|
||||
return result, var_err
|
||||
|
||||
case .Table_Not_Found:
|
||||
// Non-existent table is a hard request failure, not a retryable condition.
|
||||
// DynamoDB returns ResourceNotFoundException for the whole request.
|
||||
batch_write_result_destroy(&result)
|
||||
delete(failed_requests)
|
||||
return result, .Table_Not_Found
|
||||
|
||||
case .RocksDB_Error, .Item_Not_Found:
|
||||
// Genuinely transient/infrastructure errors — add to UnprocessedItems.
|
||||
failed_item := item_deep_copy(req.item)
|
||||
append(&failed_requests, Write_Request{
|
||||
type = req.type,
|
||||
item = failed_item,
|
||||
})
|
||||
|
||||
case .None, .Validation_Error, .Internal_Error:
|
||||
// Should not happen, but handle gracefully
|
||||
failed_item := item_deep_copy(req.item)
|
||||
append(&failed_requests, Write_Request{
|
||||
type = req.type,
|
||||
item = failed_item,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +137,7 @@ batch_write_item :: proc(
|
||||
return result, .None
|
||||
}
|
||||
|
||||
|
||||
// ============================================================================
|
||||
// BatchGetItem Types
|
||||
// ============================================================================
|
||||
@@ -157,6 +194,17 @@ batch_get_item :: proc(
|
||||
unprocessed_keys = make([dynamic]Batch_Get_Table_Request),
|
||||
}
|
||||
|
||||
// Count total keys across all tables
|
||||
total_keys := 0
|
||||
for table_req in table_requests {
|
||||
total_keys += len(table_req.keys)
|
||||
}
|
||||
|
||||
// Enforce DynamoDB limit: 100 keys per batch
|
||||
if total_keys > 100 {
|
||||
return result, .Validation_Error
|
||||
}
|
||||
|
||||
for table_req in table_requests {
|
||||
found_items := make([dynamic]Item)
|
||||
failed_keys := make([dynamic]Item)
|
||||
@@ -164,10 +212,25 @@ batch_get_item :: proc(
|
||||
for key in table_req.keys {
|
||||
item_result, get_err := get_item(engine, table_req.table_name, key)
|
||||
|
||||
// Distinguish validation errors from transient failures
|
||||
if get_err != .None && get_err != .Item_Not_Found {
|
||||
// Storage error — add to unprocessed
|
||||
append(&failed_keys, item_deep_copy(key))
|
||||
continue
|
||||
#partial switch get_err {
|
||||
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
|
||||
// Hard validation error — fail the entire batch
|
||||
batch_get_result_destroy(&result)
|
||||
delete(found_items)
|
||||
delete(failed_keys)
|
||||
return result, get_err
|
||||
|
||||
case .RocksDB_Error, .Table_Not_Found:
|
||||
// Transient error — add to unprocessed
|
||||
append(&failed_keys, item_deep_copy(key))
|
||||
continue
|
||||
|
||||
case .None, .Validation_Error, .Internal_Error, .Item_Not_Found:
|
||||
// Should not happen here, but handle gracefully
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if item_val, has_item := item_result.?; has_item {
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:strings"
|
||||
|
||||
// ============================================================================
|
||||
// Condition Evaluation Result
|
||||
@@ -54,7 +55,7 @@ parse_condition_expression_string :: proc(request_body: []byte) -> (expr: string
|
||||
return
|
||||
}
|
||||
|
||||
expr = string(ce_str)
|
||||
expr = strings.clone(string(ce_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
@@ -88,6 +89,7 @@ evaluate_condition_expression :: proc(
|
||||
if !has_condition {
|
||||
return .Passed // No condition → always pass
|
||||
}
|
||||
defer delete(condition_str)
|
||||
|
||||
// Parse the condition into a filter tree (same grammar as FilterExpression)
|
||||
filter_node, parse_ok := parse_filter_expression(condition_str, attr_names, attr_values)
|
||||
|
||||
@@ -60,8 +60,9 @@ key_condition_get_pk_bytes :: proc(kc: ^Key_Condition) -> ([]byte, bool) {
|
||||
#partial switch v in kc.pk_value {
|
||||
case String:
|
||||
return transmute([]byte)string(v), true
|
||||
case Number:
|
||||
return transmute([]byte)string(v), true
|
||||
case DDB_Number:
|
||||
// Use canonical encoding for numbers in keys!
|
||||
return encode_ddb_number_for_sort(v), true
|
||||
case Binary:
|
||||
return transmute([]byte)string(v), true
|
||||
}
|
||||
@@ -110,7 +111,7 @@ tokenizer_next :: proc(t: ^Tokenizer) -> Maybe(string) {
|
||||
}
|
||||
|
||||
// Single-character operators
|
||||
if c == '=' || c == '<' || c == '>' {
|
||||
if c == '=' || c == '<' || c == '>' || c == '+' || c == '-' {
|
||||
t.pos += 1
|
||||
return t.input[start:t.pos]
|
||||
}
|
||||
@@ -137,9 +138,9 @@ is_whitespace :: proc(c: byte) -> bool {
|
||||
@(private = "file")
|
||||
is_ident_char :: proc(c: byte) -> bool {
|
||||
return (c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == ':' || c == '#' || c == '-' || c == '.'
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == ':' || c == '#' || c == '.'
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -173,12 +174,14 @@ parse_key_condition_expression :: proc(
|
||||
|
||||
eq_token := next_token(&t) or_return
|
||||
if eq_token != "=" {
|
||||
delete(pk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
pk_value_token := next_token(&t) or_return
|
||||
pk_value, pk_ok := resolve_attribute_value(pk_value_token, attribute_values)
|
||||
if !pk_ok {
|
||||
delete(pk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
@@ -187,18 +190,30 @@ parse_key_condition_expression :: proc(
|
||||
// Optional "AND ..."
|
||||
if and_token, has_and := tokenizer_next(&t).?; has_and {
|
||||
if !strings.equal_fold(and_token, "AND") {
|
||||
delete(pk_name) // free on error
|
||||
attr_value_destroy(&pk_value)
|
||||
return
|
||||
}
|
||||
|
||||
skc, skc_ok := parse_sort_key_condition(&t, attribute_names, attribute_values)
|
||||
if !skc_ok {
|
||||
delete(pk_name) // free on error
|
||||
attr_value_destroy(&pk_value)
|
||||
return
|
||||
}
|
||||
sk_condition = skc
|
||||
}
|
||||
|
||||
// Verify all tokens were consumed (no trailing garbage)
|
||||
if trailing := tokenizer_next(&t); trailing != nil {
|
||||
delete(pk_name)
|
||||
attr_value_destroy(&pk_value)
|
||||
if skc, has_skc := sk_condition.?; has_skc {
|
||||
skc_copy := skc
|
||||
sort_key_condition_destroy(&skc_copy)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
kc = Key_Condition{
|
||||
pk_name = pk_name,
|
||||
@@ -228,12 +243,14 @@ parse_sort_key_condition :: proc(
|
||||
op_token := next_token(t) or_return
|
||||
operator, op_ok := parse_operator(op_token)
|
||||
if !op_ok {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
value_token := next_token(t) or_return
|
||||
value, val_ok := resolve_attribute_value(value_token, attribute_values)
|
||||
if !val_ok {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
@@ -242,18 +259,21 @@ parse_sort_key_condition :: proc(
|
||||
// IMPORTANT: after allocating `value`, do NOT use `or_return` without cleanup.
|
||||
and_token, tok_ok := next_token(t)
|
||||
if !tok_ok || !strings.equal_fold(and_token, "AND") {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
|
||||
value2_token, tok2_ok := next_token(t)
|
||||
if !tok2_ok {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
|
||||
v2, v2_ok := resolve_attribute_value(value2_token, attribute_values)
|
||||
if !v2_ok {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
@@ -287,18 +307,21 @@ parse_begins_with :: proc(
|
||||
|
||||
comma := next_token(t) or_return
|
||||
if comma != "," {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
value_token := next_token(t) or_return
|
||||
value, val_ok := resolve_attribute_value(value_token, attribute_values)
|
||||
if !val_ok {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
// after allocating `value`, avoid `or_return` so we can clean up
|
||||
rparen, tok_ok := next_token(t)
|
||||
if !tok_ok || rparen != ")" {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
@@ -393,13 +416,13 @@ parse_expression_attribute_names :: proc(request_body: []byte) -> Maybe(map[stri
|
||||
parse_expression_attribute_values :: proc(request_body: []byte) -> (map[string]Attribute_Value, bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return make(map[string]Attribute_Value), true
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, ok := data.(json.Object)
|
||||
if !ok {
|
||||
return make(map[string]Attribute_Value), true
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
|
||||
values_val, found := root["ExpressionAttributeValues"]
|
||||
@@ -409,7 +432,7 @@ parse_expression_attribute_values :: proc(request_body: []byte) -> (map[string]A
|
||||
|
||||
values_obj, values_ok := values_val.(json.Object)
|
||||
if !values_ok {
|
||||
return make(map[string]Attribute_Value), true
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
|
||||
result := make(map[string]Attribute_Value)
|
||||
@@ -417,7 +440,13 @@ parse_expression_attribute_values :: proc(request_body: []byte) -> (map[string]A
|
||||
for key, val in values_obj {
|
||||
attr, attr_ok := parse_attribute_value(val)
|
||||
if !attr_ok {
|
||||
continue
|
||||
// Clean up already-parsed values before returning error
|
||||
for k, &v in result {
|
||||
attr_value_destroy(&v)
|
||||
delete(k)
|
||||
}
|
||||
delete(result)
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
result[strings.clone(key)] = attr
|
||||
}
|
||||
@@ -451,7 +480,7 @@ parse_key_condition_expression_string :: proc(request_body: []byte) -> (expr: st
|
||||
return
|
||||
}
|
||||
|
||||
expr = string(kce_str)
|
||||
expr = strings.clone(string(kce_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
@@ -459,6 +488,7 @@ parse_key_condition_expression_string :: proc(request_body: []byte) -> (expr: st
|
||||
// Convenience: parse a complete Query key condition from request body
|
||||
parse_query_key_condition :: proc(request_body: []byte) -> (kc: Key_Condition, ok: bool) {
|
||||
expression := parse_key_condition_expression_string(request_body) or_return
|
||||
defer delete(expression)
|
||||
|
||||
attr_names := parse_expression_attribute_names(request_body)
|
||||
defer {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:strconv"
|
||||
import "core:strings"
|
||||
import "core:mem"
|
||||
|
||||
// ============================================================================
|
||||
// ProjectionExpression
|
||||
@@ -51,6 +51,10 @@ parse_projection_expression :: proc(
|
||||
|
||||
resolved, res_ok := resolve_attribute_name(trimmed, attribute_names)
|
||||
if !res_ok {
|
||||
// Cleanup previously cloned strings
|
||||
for path in result {
|
||||
delete(path)
|
||||
}
|
||||
delete(result)
|
||||
return nil, false
|
||||
}
|
||||
@@ -139,6 +143,7 @@ Filter_Node :: struct {
|
||||
right: ^Filter_Node,
|
||||
// For Not
|
||||
child: ^Filter_Node,
|
||||
allocator: mem.Allocator, // allocator that created this node
|
||||
}
|
||||
|
||||
filter_node_destroy :: proc(node: ^Filter_Node) {
|
||||
@@ -167,6 +172,9 @@ filter_node_destroy :: proc(node: ^Filter_Node) {
|
||||
if node.child != nil {
|
||||
filter_node_destroy(node.child)
|
||||
}
|
||||
|
||||
// Free the node itself using the allocator that created it
|
||||
free(node, node.allocator)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@@ -180,7 +188,17 @@ parse_filter_expression :: proc(
|
||||
) -> (node: ^Filter_Node, ok: bool) {
|
||||
t := tokenizer_init(expression)
|
||||
node, ok = parse_or_expr(&t, attribute_names, attribute_values)
|
||||
return
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Verify all tokens were consumed (no trailing garbage)
|
||||
if trailing := tokenizer_next(&t); trailing != nil {
|
||||
filter_node_destroy(node)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return node, true
|
||||
}
|
||||
|
||||
parse_or_expr :: proc(
|
||||
@@ -208,7 +226,7 @@ parse_or_expr :: proc(
|
||||
return nil, false
|
||||
}
|
||||
|
||||
parent := new(Filter_Node)
|
||||
parent := make_filter_node()
|
||||
parent.type = .Or
|
||||
parent.left = left
|
||||
parent.right = right
|
||||
@@ -247,7 +265,7 @@ parse_and_expr :: proc(
|
||||
return nil, false
|
||||
}
|
||||
|
||||
parent := new(Filter_Node)
|
||||
parent := make_filter_node()
|
||||
parent.type = .And
|
||||
parent.left = left
|
||||
parent.right = right
|
||||
@@ -278,7 +296,7 @@ parse_not_expr :: proc(
|
||||
if !child_ok {
|
||||
return nil, false
|
||||
}
|
||||
node := new(Filter_Node)
|
||||
node := make_filter_node()
|
||||
node.type = .Not
|
||||
node.child = child
|
||||
return node, true
|
||||
@@ -374,7 +392,7 @@ parse_primary_expr :: proc(
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := new(Filter_Node)
|
||||
node := make_filter_node()
|
||||
node.type = .Comparison
|
||||
node.path = path
|
||||
node.comp_op = comp_op
|
||||
@@ -421,7 +439,7 @@ parse_filter_begins_with :: proc(
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := new(Filter_Node)
|
||||
node := make_filter_node()
|
||||
node.type = .Begins_With
|
||||
node.path = path
|
||||
node.value = val
|
||||
@@ -467,7 +485,7 @@ parse_filter_contains :: proc(
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := new(Filter_Node)
|
||||
node := make_filter_node()
|
||||
node.type = .Contains
|
||||
node.path = path
|
||||
node.value = val
|
||||
@@ -498,7 +516,7 @@ parse_filter_attr_exists :: proc(
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := new(Filter_Node)
|
||||
node := make_filter_node()
|
||||
node.type = .Attribute_Exists if exists else .Attribute_Not_Exists
|
||||
node.path = path
|
||||
return node, true
|
||||
@@ -536,7 +554,7 @@ parse_filter_between :: proc(
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := new(Filter_Node)
|
||||
node := make_filter_node()
|
||||
node.type = .Between
|
||||
node.path = path
|
||||
node.value = lo_val
|
||||
@@ -597,7 +615,7 @@ parse_filter_in :: proc(
|
||||
}
|
||||
}
|
||||
|
||||
node := new(Filter_Node)
|
||||
node := make_filter_node()
|
||||
node.type = .In
|
||||
node.path = path
|
||||
node.in_values = in_vals[:]
|
||||
@@ -688,6 +706,12 @@ evaluate_filter :: proc(item: Item, node: ^Filter_Node) -> bool {
|
||||
evaluate_comparison :: proc(attr: Attribute_Value, op: Comparison_Op, val: Attribute_Value) -> bool {
|
||||
cmp := compare_attribute_values(attr, val)
|
||||
|
||||
// -2 means types are incomparable - all comparisons return false
|
||||
// (matches DynamoDB behavior: mixed-type comparisons always fail)
|
||||
if cmp == -2 {
|
||||
return false
|
||||
}
|
||||
|
||||
switch op {
|
||||
case .EQ: return cmp == 0
|
||||
case .NE: return cmp != 0
|
||||
@@ -716,10 +740,10 @@ evaluate_contains :: proc(attr: Attribute_Value, val: Attribute_Value) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
case Number_Set:
|
||||
if v, ok := val.(Number); ok {
|
||||
for n in a {
|
||||
if n == string(v) {
|
||||
case DDB_Number_Set:
|
||||
if v, ok := val.(DDB_Number); ok {
|
||||
for num in a {
|
||||
if compare_ddb_numbers(num, v) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -755,21 +779,13 @@ compare_attribute_values :: proc(a: Attribute_Value, b: Attribute_Value) -> int
|
||||
return -2
|
||||
}
|
||||
|
||||
// For Numbers, do numeric comparison
|
||||
_, a_is_num := a.(Number)
|
||||
_, b_is_num := b.(Number)
|
||||
// For Numbers, do with DDB_Number comparison
|
||||
_, a_is_num := a.(DDB_Number)
|
||||
_, b_is_num := b.(DDB_Number)
|
||||
if a_is_num && b_is_num {
|
||||
a_val, a_parse := strconv.parse_f64(a_str)
|
||||
b_val, b_parse := strconv.parse_f64(b_str)
|
||||
if a_parse && b_parse {
|
||||
if a_val < b_val {
|
||||
return -1
|
||||
}
|
||||
if a_val > b_val {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
a_num := a.(DDB_Number)
|
||||
b_num := b.(DDB_Number)
|
||||
return compare_ddb_numbers(a_num, b_num)
|
||||
}
|
||||
|
||||
return strings.compare(a_str, b_str)
|
||||
@@ -801,7 +817,17 @@ parse_filter_expression_string :: proc(request_body: []byte) -> (expr: string, o
|
||||
return
|
||||
}
|
||||
|
||||
expr = string(fe_str)
|
||||
expr = strings.clone(string(fe_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Allocator Helper
|
||||
// ============================================================================
|
||||
|
||||
make_filter_node :: proc() -> ^Filter_Node {
|
||||
node := new(Filter_Node)
|
||||
node.allocator = context.allocator
|
||||
return node
|
||||
}
|
||||
@@ -20,6 +20,9 @@
|
||||
// delete → for each GSI, extract GSI key attrs from the OLD item, delete GSI entry
|
||||
// update → delete OLD GSI entries, write NEW GSI entries
|
||||
//
|
||||
// ATOMICITY: All GSI operations use WriteBatch to ensure that GSI entries are
|
||||
// maintained atomically with the base item write/delete.
|
||||
//
|
||||
package dynamodb
|
||||
|
||||
import "core:slice"
|
||||
@@ -39,25 +42,22 @@ GSI_Key_Values :: struct {
|
||||
}
|
||||
|
||||
// Extract GSI key values from an item based on the GSI's key schema.
|
||||
// Returns ok=false if the required partition key attribute is missing (sparse index).
|
||||
// Returns ok=false if ANY required key attribute is missing (sparse index).
|
||||
// DynamoDB sparse index semantics: item must have ALL key attributes defined in the GSI schema.
|
||||
gsi_extract_key_values :: proc(item: Item, gsi_key_schema: []Key_Schema_Element) -> (GSI_Key_Values, bool) {
|
||||
result: GSI_Key_Values
|
||||
|
||||
for ks in gsi_key_schema {
|
||||
attr, found := item[ks.attribute_name]
|
||||
if !found {
|
||||
if ks.key_type == .HASH {
|
||||
return {}, false // PK missing → sparse, skip this GSI entry
|
||||
}
|
||||
continue // SK missing is OK, just no SK segment
|
||||
// Any key attribute missing → sparse index, skip this item
|
||||
return {}, false
|
||||
}
|
||||
|
||||
raw, raw_ok := attr_value_to_bytes(attr)
|
||||
if !raw_ok {
|
||||
if ks.key_type == .HASH {
|
||||
return {}, false
|
||||
}
|
||||
continue
|
||||
// Can't convert attribute to bytes → skip this item
|
||||
return {}, false
|
||||
}
|
||||
|
||||
switch ks.key_type {
|
||||
@@ -76,8 +76,8 @@ attr_value_to_bytes :: proc(attr: Attribute_Value) -> ([]byte, bool) {
|
||||
#partial switch v in attr {
|
||||
case String:
|
||||
return transmute([]byte)string(v), true
|
||||
case Number:
|
||||
return transmute([]byte)string(v), true
|
||||
case DDB_Number:
|
||||
return encode_ddb_number_for_sort(v), true
|
||||
case Binary:
|
||||
return transmute([]byte)string(v), true
|
||||
}
|
||||
@@ -156,16 +156,16 @@ gsi_project_item :: proc(
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GSI Write Maintenance
|
||||
// GSI Write Maintenance - ATOMIC via WriteBatch
|
||||
//
|
||||
// Called after a successful data write to maintain GSI entries.
|
||||
// Uses WriteBatch for atomicity (all GSI entries for one item in one batch).
|
||||
// These procedures add GSI operations to a WriteBatch instead of performing
|
||||
// direct database writes. This ensures atomicity with the base item operation.
|
||||
// ============================================================================
|
||||
|
||||
// Write GSI entries for an item across all GSIs defined on the table.
|
||||
// Should be called AFTER the main data key is written.
|
||||
gsi_write_entries :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
// Add GSI write operations to a WriteBatch for an item across all GSIs.
|
||||
// Called during put_item or update_item to maintain NEW GSI entries.
|
||||
gsi_batch_write_entries :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
table_name: string,
|
||||
item: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
@@ -175,15 +175,31 @@ gsi_write_entries :: proc(
|
||||
return .None
|
||||
}
|
||||
|
||||
base_key, base_ok := key_from_item(item, metadata.key_schema)
|
||||
if !base_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&base_key)
|
||||
|
||||
base_vals, base_vals_ok := key_get_values(&base_key)
|
||||
if !base_vals_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
for &gsi in gsis {
|
||||
// Extract GSI key from item
|
||||
gsi_kv, kv_ok := gsi_extract_key_values(item, gsi.key_schema)
|
||||
if !kv_ok {
|
||||
continue // Sparse: item doesn't have GSI PK, skip
|
||||
}
|
||||
if !kv_ok do continue // item doesn't have GSI PK, skip
|
||||
|
||||
// Build GSI storage key
|
||||
gsi_storage_key := build_gsi_key(table_name, gsi.index_name, gsi_kv.pk, gsi_kv.sk)
|
||||
gsi_storage_key := build_gsi_key(
|
||||
table_name,
|
||||
gsi.index_name,
|
||||
gsi_kv.pk,
|
||||
gsi_kv.sk,
|
||||
base_vals.pk,
|
||||
base_vals.sk,
|
||||
)
|
||||
defer delete(gsi_storage_key)
|
||||
|
||||
// Build projected item
|
||||
@@ -197,21 +213,18 @@ gsi_write_entries :: proc(
|
||||
}
|
||||
defer delete(encoded)
|
||||
|
||||
// Write to RocksDB
|
||||
put_err := rocksdb.db_put(&engine.db, gsi_storage_key, encoded)
|
||||
if put_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
// Add to batch (not written yet)
|
||||
rocksdb.batch_put(batch, gsi_storage_key, encoded)
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// Delete GSI entries for an item across all GSIs.
|
||||
// Should be called BEFORE or AFTER the main data key is deleted.
|
||||
// Add GSI delete operations to a WriteBatch for an item across all GSIs.
|
||||
// Called during delete_item or update_item to remove OLD GSI entries.
|
||||
// Needs the OLD item to know which GSI keys to remove.
|
||||
gsi_delete_entries :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
gsi_batch_delete_entries :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
table_name: string,
|
||||
old_item: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
@@ -221,19 +234,35 @@ gsi_delete_entries :: proc(
|
||||
return .None
|
||||
}
|
||||
|
||||
for &gsi in gsis {
|
||||
gsi_kv, kv_ok := gsi_extract_key_values(old_item, gsi.key_schema)
|
||||
if !kv_ok {
|
||||
continue // Item didn't have a GSI entry
|
||||
}
|
||||
base_key, base_ok := key_from_item(old_item, metadata.key_schema)
|
||||
if !base_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&base_key)
|
||||
|
||||
gsi_storage_key := build_gsi_key(table_name, gsi.index_name, gsi_kv.pk, gsi_kv.sk)
|
||||
base_vals, base_vals_ok := key_get_values(&base_key)
|
||||
if !base_vals_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
for &gsi in gsis {
|
||||
// Extract GSI key from item
|
||||
gsi_kv, kv_ok := gsi_extract_key_values(old_item, gsi.key_schema)
|
||||
if !kv_ok do continue // old item doesn't have GSI PK, skip
|
||||
|
||||
// Build GSI storage key
|
||||
gsi_storage_key := build_gsi_key(
|
||||
table_name,
|
||||
gsi.index_name,
|
||||
gsi_kv.pk,
|
||||
gsi_kv.sk,
|
||||
base_vals.pk,
|
||||
base_vals.sk,
|
||||
)
|
||||
defer delete(gsi_storage_key)
|
||||
|
||||
del_err := rocksdb.db_delete(&engine.db, gsi_storage_key)
|
||||
if del_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
// Add to batch (not written yet)
|
||||
rocksdb.batch_delete(batch, gsi_storage_key)
|
||||
}
|
||||
|
||||
return .None
|
||||
|
||||
@@ -76,10 +76,12 @@ encode_attribute_value :: proc(buf: ^bytes.Buffer, attr: Attribute_Value) -> boo
|
||||
encode_varint(buf, len(v))
|
||||
bytes.buffer_write_string(buf, string(v))
|
||||
|
||||
case Number:
|
||||
case DDB_Number:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Number))
|
||||
encode_varint(buf, len(v))
|
||||
bytes.buffer_write_string(buf, string(v))
|
||||
// Store as string in item encoding
|
||||
num_str := format_ddb_number(v)
|
||||
encode_varint(buf, len(num_str))
|
||||
bytes.buffer_write_string(buf, num_str)
|
||||
|
||||
case Binary:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Binary))
|
||||
@@ -94,6 +96,16 @@ encode_attribute_value :: proc(buf: ^bytes.Buffer, attr: Attribute_Value) -> boo
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Null))
|
||||
// NULL has no value bytes
|
||||
|
||||
case DDB_Number_Set:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Number_Set)) // Use Number_Set tag, not DDB_Number_Set
|
||||
encode_varint(buf, len(v))
|
||||
for num in v {
|
||||
// Format the DDB_Number to a string
|
||||
num_str := format_ddb_number(num)
|
||||
encode_varint(buf, len(num_str))
|
||||
bytes.buffer_write_string(buf, num_str)
|
||||
}
|
||||
|
||||
case String_Set:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.String_Set))
|
||||
encode_varint(buf, len(v))
|
||||
@@ -102,14 +114,6 @@ encode_attribute_value :: proc(buf: ^bytes.Buffer, attr: Attribute_Value) -> boo
|
||||
bytes.buffer_write_string(buf, s)
|
||||
}
|
||||
|
||||
case Number_Set:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Number_Set))
|
||||
encode_varint(buf, len(v))
|
||||
for n in v {
|
||||
encode_varint(buf, len(n))
|
||||
bytes.buffer_write_string(buf, n)
|
||||
}
|
||||
|
||||
case Binary_Set:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Binary_Set))
|
||||
encode_varint(buf, len(v))
|
||||
@@ -289,9 +293,15 @@ decode_attribute_value :: proc(decoder: ^Binary_Decoder) -> (Attribute_Value, bo
|
||||
return nil, false
|
||||
}
|
||||
|
||||
str := string(data)
|
||||
owned := transmute(string)slice.clone(transmute([]byte)str)
|
||||
return Number(owned), true
|
||||
num_str := string(data)
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(num_str)
|
||||
if !num_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return ddb_num, true
|
||||
|
||||
case .Binary:
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
@@ -359,32 +369,35 @@ decode_attribute_value :: proc(decoder: ^Binary_Decoder) -> (Attribute_Value, bo
|
||||
return nil, false
|
||||
}
|
||||
|
||||
numbers := make([]string, count)
|
||||
numbers := make([]DDB_Number, count) // Changed to DDB_Number
|
||||
|
||||
for i in 0..<count {
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
if !len_ok {
|
||||
for j in 0..<i {
|
||||
delete(numbers[j])
|
||||
}
|
||||
// No cleanup needed for DDB_Number (no heap allocations)
|
||||
delete(numbers)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
data, data_ok := decoder_read_bytes(decoder, length)
|
||||
if !data_ok {
|
||||
for j in 0..<i {
|
||||
delete(numbers[j])
|
||||
}
|
||||
delete(numbers)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
str := string(data)
|
||||
numbers[i] = transmute(string)slice.clone(transmute([]byte)str)
|
||||
num_str := string(data)
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(num_str)
|
||||
if !num_ok {
|
||||
delete(numbers)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
numbers[i] = ddb_num
|
||||
}
|
||||
|
||||
return Number_Set(numbers), true
|
||||
return DDB_Number_Set(numbers), true
|
||||
|
||||
case .Binary_Set:
|
||||
count, count_ok := decoder_read_varint(decoder)
|
||||
|
||||
@@ -85,7 +85,16 @@ parse_attribute_value :: proc(value: json.Value) -> (Attribute_Value, bool) {
|
||||
if !str_ok {
|
||||
return nil, false
|
||||
}
|
||||
return Number(strings.clone(string(str))), true
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(string(str))
|
||||
if !num_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Clone the string fields since they're slices of the input
|
||||
owned_num := clone_ddb_number(ddb_num)
|
||||
return owned_num, true
|
||||
}
|
||||
|
||||
// Binary (base64 string)
|
||||
@@ -147,22 +156,38 @@ parse_attribute_value :: proc(value: json.Value) -> (Attribute_Value, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
numbers_arr := make([]string, len(arr))
|
||||
numbers_arr := make([]DDB_Number, len(arr))
|
||||
|
||||
for item, i in arr {
|
||||
str, str_ok := item.(json.String)
|
||||
if !str_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
delete(numbers_arr[j])
|
||||
// Clean up DDB_Numbers
|
||||
delete(numbers_arr[j].integer_part)
|
||||
delete(numbers_arr[j].fractional_part)
|
||||
}
|
||||
delete(numbers_arr)
|
||||
return nil, false
|
||||
}
|
||||
numbers_arr[i] = strings.clone(string(str))
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(string(str))
|
||||
if !num_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
delete(numbers_arr[j].integer_part)
|
||||
delete(numbers_arr[j].fractional_part)
|
||||
}
|
||||
delete(numbers_arr)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Clone and store
|
||||
numbers_arr[i] = clone_ddb_number(ddb_num)
|
||||
}
|
||||
|
||||
return Number_Set(numbers_arr), true
|
||||
return DDB_Number_Set(numbers_arr), true
|
||||
}
|
||||
|
||||
// Binary Set
|
||||
@@ -297,16 +322,25 @@ serialize_item_to_builder :: proc(b: ^strings.Builder, item: Item) {
|
||||
serialize_attribute_value :: proc(b: ^strings.Builder, attr: Attribute_Value) {
|
||||
switch v in attr {
|
||||
case String:
|
||||
fmt.sbprintf(b, `{"S":"%s"}`, string(v))
|
||||
strings.write_string(b, `{"S":"`)
|
||||
strings.write_string(b, string(v))
|
||||
strings.write_string(b, `"}`)
|
||||
|
||||
case Number:
|
||||
fmt.sbprintf(b, `{"N":"%s"}`, string(v))
|
||||
case DDB_Number:
|
||||
num_str := format_ddb_number(v)
|
||||
strings.write_string(b, `{"N":"`)
|
||||
strings.write_string(b, num_str)
|
||||
strings.write_string(b, `"}`)
|
||||
|
||||
case Binary:
|
||||
fmt.sbprintf(b, `{"B":"%s"}`, string(v))
|
||||
strings.write_string(b, `{"B":"`)
|
||||
strings.write_string(b, string(v))
|
||||
strings.write_string(b, `"}`)
|
||||
|
||||
case Bool:
|
||||
fmt.sbprintf(b, `{"BOOL":%v}`, bool(v))
|
||||
strings.write_string(b, `{"BOOL":`)
|
||||
if bool(v) { strings.write_string(b, "true") } else { strings.write_string(b, "false") }
|
||||
strings.write_string(b, "}")
|
||||
|
||||
case Null:
|
||||
strings.write_string(b, `{"NULL":true}`)
|
||||
@@ -321,13 +355,14 @@ serialize_attribute_value :: proc(b: ^strings.Builder, attr: Attribute_Value) {
|
||||
}
|
||||
strings.write_string(b, "]}")
|
||||
|
||||
case Number_Set:
|
||||
case DDB_Number_Set:
|
||||
strings.write_string(b, `{"NS":[`)
|
||||
for n, i in v {
|
||||
for num, i in v {
|
||||
if i > 0 {
|
||||
strings.write_string(b, ",")
|
||||
}
|
||||
fmt.sbprintf(b, `"%s"`, n)
|
||||
num_str := format_ddb_number(num)
|
||||
fmt.sbprintf(b, `"%s"`, num_str)
|
||||
}
|
||||
strings.write_string(b, "]}")
|
||||
|
||||
@@ -404,7 +439,7 @@ parse_table_name :: proc(request_body: []byte) -> (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return string(table_name_str), true
|
||||
return strings.clone(string(table_name_str)), true
|
||||
}
|
||||
|
||||
// Parse Item field from request body
|
||||
@@ -494,45 +529,49 @@ parse_limit :: proc(request_body: []byte) -> int {
|
||||
// Returns nil (not an error) when the field is absent.
|
||||
// ============================================================================
|
||||
|
||||
// Returns (key, ok, body_parse_err).
|
||||
// ok=true, body_parse_err=false → key present and valid, or key absent (no pagination)
|
||||
// ok=false, body_parse_err=true → request body is not valid JSON or not an object
|
||||
// ok=false, body_parse_err=false → ExclusiveStartKey present but malformed/invalid
|
||||
parse_exclusive_start_key :: proc(
|
||||
request_body: []byte,
|
||||
table_name: string,
|
||||
key_schema: []Key_Schema_Element,
|
||||
) -> (result: Maybe([]byte), ok: bool) {
|
||||
) -> (result: Maybe([]byte), ok: bool, body_err: bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return nil, true // no ESK is fine
|
||||
return nil, false, true // body is not valid JSON — real error
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return nil, true
|
||||
return nil, false, true // root must be an object — real error
|
||||
}
|
||||
|
||||
esk_val, found := root["ExclusiveStartKey"]
|
||||
if !found {
|
||||
return nil, true // absent → no pagination, that's ok
|
||||
return nil, true, false // absent → no pagination, that's ok
|
||||
}
|
||||
|
||||
// Parse ExclusiveStartKey as a DynamoDB Item
|
||||
key_item, item_ok := parse_item_from_value(esk_val)
|
||||
if !item_ok {
|
||||
return nil, false // present but malformed → real error
|
||||
return nil, false, false // present but malformed → validation error
|
||||
}
|
||||
defer item_destroy(&key_item)
|
||||
|
||||
// Validate and extract key struct using schema
|
||||
key_struct, key_ok := key_from_item(key_item, key_schema)
|
||||
if !key_ok {
|
||||
return nil, false // missing required key attributes
|
||||
return nil, false, false // missing required key attributes
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
// Get raw byte values
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return nil, false
|
||||
return nil, false, false
|
||||
}
|
||||
|
||||
// Build binary storage key
|
||||
@@ -542,6 +581,53 @@ parse_exclusive_start_key :: proc(
|
||||
return
|
||||
}
|
||||
|
||||
// parse_exclusive_start_key_gsi ... Just a helper for GSI keys
|
||||
// Returns (key, ok, body_parse_err) — same contract as parse_exclusive_start_key.
|
||||
parse_exclusive_start_key_gsi :: proc(
|
||||
request_body: []byte,
|
||||
table_name: string,
|
||||
metadata: ^Table_Metadata,
|
||||
gsi: ^Global_Secondary_Index,
|
||||
) -> (Maybe([]byte), bool, bool) {
|
||||
root, parse_err := json.parse(request_body)
|
||||
if parse_err != nil do return nil, false, true // body not valid JSON
|
||||
defer json.destroy_value(root)
|
||||
|
||||
obj, obj_ok := root.(json.Object)
|
||||
if !obj_ok do return nil, false, true // root must be an object
|
||||
|
||||
esk_val, has := obj["ExclusiveStartKey"]
|
||||
if !has do return nil, true, false // absent → no pagination
|
||||
|
||||
key_item, key_ok := parse_item_from_value(esk_val)
|
||||
if !key_ok do return nil, false, false
|
||||
defer item_destroy(&key_item)
|
||||
|
||||
idx_key, idx_ok := key_from_item(key_item, gsi.key_schema)
|
||||
if !idx_ok do return nil, false, false
|
||||
defer key_destroy(&idx_key)
|
||||
|
||||
idx_vals, idx_vals_ok := key_get_values(&idx_key)
|
||||
if !idx_vals_ok do return nil, false, false
|
||||
|
||||
base_key, base_ok := key_from_item(key_item, metadata.key_schema)
|
||||
if !base_ok do return nil, false, false
|
||||
defer key_destroy(&base_key)
|
||||
|
||||
base_vals, base_vals_ok := key_get_values(&base_key)
|
||||
if !base_vals_ok do return nil, false, false
|
||||
|
||||
k := build_gsi_key(
|
||||
table_name,
|
||||
gsi.index_name,
|
||||
idx_vals.pk,
|
||||
idx_vals.sk,
|
||||
base_vals.pk,
|
||||
base_vals.sk,
|
||||
)
|
||||
return k, true, false
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LastEvaluatedKey Generation (Pagination Output)
|
||||
//
|
||||
@@ -613,3 +699,122 @@ serialize_last_evaluated_key :: proc(
|
||||
|
||||
return serialize_item(item), true
|
||||
}
|
||||
|
||||
Decoded_GSI_Key_Full :: struct {
|
||||
gsi_pk: []byte,
|
||||
gsi_sk: Maybe([]byte),
|
||||
base_pk: []byte,
|
||||
base_sk: Maybe([]byte),
|
||||
}
|
||||
|
||||
// Decode binary GSI key:
|
||||
//
|
||||
// [gsi][table_name][index_name][gsi_pk][gsi_sk?][base_pk][base_sk?]
|
||||
//
|
||||
// Presence of gsi_sk/base_sk depends on whether the index/table has a RANGE key.
|
||||
decode_gsi_key_full_borrowed :: proc(
|
||||
binary_key: []byte,
|
||||
gsi_has_sort_key: bool,
|
||||
table_has_sort_key: bool,
|
||||
) -> (result: Decoded_GSI_Key_Full, ok: bool) {
|
||||
decoder := Key_Decoder{data = binary_key, pos = 0}
|
||||
|
||||
et := decoder_read_entity_type(&decoder) or_return
|
||||
if et != .GSI {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Skip table name + index name
|
||||
_ = decoder_read_segment_borrowed(&decoder) or_return
|
||||
_ = decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read GSI PK
|
||||
result.gsi_pk = decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read GSI SK if index has one
|
||||
if gsi_has_sort_key {
|
||||
sk := decoder_read_segment_borrowed(&decoder) or_return
|
||||
result.gsi_sk = sk
|
||||
}
|
||||
|
||||
// Read base PK
|
||||
result.base_pk = decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read base SK if table has one
|
||||
if table_has_sort_key {
|
||||
sk := decoder_read_segment_borrowed(&decoder) or_return
|
||||
result.base_sk = sk
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
|
||||
// Serialize a binary *GSI* key into a DynamoDB LastEvaluatedKey JSON object.
|
||||
// The output must include the *index* key attrs + the *base table* primary key attrs,
|
||||
// so boto can round-trip ExclusiveStartKey correctly.
|
||||
serialize_last_evaluated_key_gsi :: proc(
|
||||
binary_key: []byte,
|
||||
metadata: ^Table_Metadata,
|
||||
gsi: ^Global_Secondary_Index,
|
||||
) -> (result: string, ok: bool) {
|
||||
|
||||
// Determine whether index/table have range keys
|
||||
_, gsi_has_sk := gsi_get_sort_key_name(gsi).?
|
||||
_, tbl_has_sk := table_metadata_get_sort_key_name(metadata).?
|
||||
|
||||
decoded, dec_ok := decode_gsi_key_full_borrowed(binary_key, gsi_has_sk, tbl_has_sk)
|
||||
if !dec_ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Resolve key attribute names + types
|
||||
idx_pk_name := gsi_get_partition_key_name(gsi).? or_return
|
||||
idx_pk_type := table_metadata_get_attribute_type(metadata, idx_pk_name).? or_return
|
||||
|
||||
idx_sk_name: Maybe(string) = gsi_get_sort_key_name(gsi)
|
||||
idx_sk_type: Maybe(Scalar_Attribute_Type) = nil
|
||||
if n, has := idx_sk_name.?; has {
|
||||
idx_sk_type = table_metadata_get_attribute_type(metadata, n)
|
||||
}
|
||||
|
||||
base_pk_name := table_metadata_get_partition_key_name(metadata).? or_return
|
||||
base_pk_type := table_metadata_get_attribute_type(metadata, base_pk_name).? or_return
|
||||
|
||||
base_sk_name: Maybe(string) = table_metadata_get_sort_key_name(metadata)
|
||||
base_sk_type: Maybe(Scalar_Attribute_Type) = nil
|
||||
if n, has := base_sk_name.?; has {
|
||||
base_sk_type = table_metadata_get_attribute_type(metadata, n)
|
||||
}
|
||||
|
||||
// Build LEK item
|
||||
lek := make(Item)
|
||||
defer item_destroy(&lek)
|
||||
|
||||
add_attr_once :: proc(item: ^Item, name: string, raw: []byte, t: Scalar_Attribute_Type) {
|
||||
if _, exists := item^[name]; exists {
|
||||
return
|
||||
}
|
||||
item^[strings.clone(name)] = build_attribute_value_with_type(raw, t)
|
||||
}
|
||||
|
||||
// Index keys
|
||||
add_attr_once(&lek, idx_pk_name, decoded.gsi_pk, idx_pk_type)
|
||||
|
||||
if sk_raw, has := decoded.gsi_sk.?; has {
|
||||
skn := idx_sk_name.? or_return
|
||||
skt := idx_sk_type.? or_return
|
||||
add_attr_once(&lek, skn, sk_raw, skt)
|
||||
}
|
||||
|
||||
// Base table keys
|
||||
add_attr_once(&lek, base_pk_name, decoded.base_pk, base_pk_type)
|
||||
|
||||
if sk_raw, has := decoded.base_sk.?; has {
|
||||
skn := base_sk_name.? or_return
|
||||
skt := base_sk_type.? or_return
|
||||
add_attr_once(&lek, skn, sk_raw, skt)
|
||||
}
|
||||
|
||||
return serialize_item(lek), true
|
||||
}
|
||||
|
||||
@@ -130,32 +130,43 @@ build_partition_prefix :: proc(table_name: string, pk_value: []byte) -> []byte {
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Build GSI key: [gsi][table_name][index_name][gsi_pk][gsi_sk?]
|
||||
build_gsi_key :: proc(table_name: string, index_name: string, gsi_pk: []byte, gsi_sk: Maybe([]byte)) -> []byte {
|
||||
// Build GSI key: [gsi][table_name][index_name][gsi_pk][gsi_sk?][base_pk][base_sk?]
|
||||
build_gsi_key :: proc(
|
||||
table_name: string,
|
||||
index_name: string,
|
||||
gsi_pk: []byte,
|
||||
gsi_sk: Maybe([]byte),
|
||||
base_pk: []byte,
|
||||
base_sk: Maybe([]byte),
|
||||
) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 512, context.allocator)
|
||||
|
||||
// Write entity type
|
||||
bytes.buffer_write_byte(&buf, u8(Entity_Type.GSI))
|
||||
|
||||
// Write table name
|
||||
encode_varint(&buf, len(table_name))
|
||||
bytes.buffer_write_string(&buf, table_name)
|
||||
|
||||
// Write index name
|
||||
encode_varint(&buf, len(index_name))
|
||||
bytes.buffer_write_string(&buf, index_name)
|
||||
|
||||
// Write GSI partition key
|
||||
encode_varint(&buf, len(gsi_pk))
|
||||
bytes.buffer_write(&buf, gsi_pk)
|
||||
|
||||
// Write GSI sort key if present
|
||||
if sk, ok := gsi_sk.?; ok {
|
||||
encode_varint(&buf, len(sk))
|
||||
bytes.buffer_write(&buf, sk)
|
||||
}
|
||||
|
||||
// tie-breaker: base table primary key
|
||||
encode_varint(&buf, len(base_pk))
|
||||
bytes.buffer_write(&buf, base_pk)
|
||||
|
||||
if sk, ok := base_sk.?; ok {
|
||||
encode_varint(&buf, len(sk))
|
||||
bytes.buffer_write(&buf, sk)
|
||||
}
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
|
||||
796
dynamodb/number.odin
Normal file
796
dynamodb/number.odin
Normal file
@@ -0,0 +1,796 @@
|
||||
package dynamodb
|
||||
|
||||
import "core:fmt"
|
||||
import "core:strconv"
|
||||
import "core:strings"
|
||||
import "core:bytes"
|
||||
|
||||
// ============================================================================
|
||||
// DynamoDB Number Type
|
||||
//
|
||||
// DynamoDB numbers are arbitrary-precision decimals with up to 38 digits of
|
||||
// precision. They can be positive, negative, or zero.
|
||||
//
|
||||
// We store numbers internally as:
|
||||
// - sign: bool (true = positive/zero, false = negative)
|
||||
// - integer_part: string (digits only, no sign)
|
||||
// - fractional_part: string (digits only, if any)
|
||||
// - exponent: i32 (for scientific notation, if needed)
|
||||
//
|
||||
// This preserves the original precision and allows proper ordering.
|
||||
// ============================================================================
|
||||
|
||||
DDB_Number :: struct {
|
||||
sign: bool, // true = positive/zero, false = negative
|
||||
integer_part: string, // digits only (e.g., "123")
|
||||
fractional_part: string, // digits only (e.g., "456" for .456)
|
||||
exponent: i32, // scientific notation exponent (usually 0)
|
||||
}
|
||||
|
||||
// Parse a number string into DDB_Number
|
||||
// Supports formats: "123", "-123", "123.456", "1.23e10", "-1.23e-5"
|
||||
parse_ddb_number :: proc(s: string) -> (DDB_Number, bool) {
|
||||
if len(s) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
num: DDB_Number
|
||||
str := s
|
||||
|
||||
// Parse sign
|
||||
if str[0] == '-' {
|
||||
num.sign = false
|
||||
str = str[1:]
|
||||
} else if str[0] == '+' {
|
||||
num.sign = true
|
||||
str = str[1:]
|
||||
} else {
|
||||
num.sign = true
|
||||
}
|
||||
|
||||
if len(str) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Find exponent if present (e or E)
|
||||
exp_pos := -1
|
||||
for i in 0..<len(str) {
|
||||
if str[i] == 'e' || str[i] == 'E' {
|
||||
exp_pos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Parse mantissa
|
||||
mantissa := str
|
||||
if exp_pos >= 0 {
|
||||
mantissa = str[:exp_pos]
|
||||
exp_str := str[exp_pos+1:]
|
||||
exp_val, exp_ok := strconv.parse_i64(exp_str)
|
||||
if !exp_ok {
|
||||
return {}, false
|
||||
}
|
||||
num.exponent = i32(exp_val)
|
||||
}
|
||||
|
||||
// Find decimal point
|
||||
dot_pos := -1
|
||||
for i in 0..<len(mantissa) {
|
||||
if mantissa[i] == '.' {
|
||||
dot_pos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Parse integer and fractional parts
|
||||
if dot_pos >= 0 {
|
||||
num.integer_part = mantissa[:dot_pos]
|
||||
num.fractional_part = mantissa[dot_pos+1:]
|
||||
|
||||
// Validate fractional part
|
||||
for c in num.fractional_part {
|
||||
if c < '0' || c > '9' {
|
||||
return {}, false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
num.integer_part = mantissa
|
||||
}
|
||||
|
||||
// Validate integer part (at least one digit, all digits)
|
||||
if len(num.integer_part) == 0 {
|
||||
num.integer_part = "0"
|
||||
}
|
||||
for c in num.integer_part {
|
||||
if c < '0' || c > '9' {
|
||||
return {}, false
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize: remove leading zeros from integer part (except if it's just "0")
|
||||
num = normalize_ddb_number(num)
|
||||
|
||||
// Check precision (DynamoDB supports up to 38 digits)
|
||||
total_digits := len(num.integer_part) + len(num.fractional_part)
|
||||
if total_digits > 38 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Special case: if the number is zero
|
||||
if is_ddb_number_zero(num) {
|
||||
num.sign = true
|
||||
num.exponent = 0
|
||||
}
|
||||
|
||||
return num, true
|
||||
}
|
||||
|
||||
// Normalize a DDB_Number (remove leading zeros, trailing fractional zeros)
|
||||
normalize_ddb_number :: proc(num: DDB_Number) -> DDB_Number {
|
||||
result := num
|
||||
|
||||
// Remove leading zeros from integer part
|
||||
int_part := num.integer_part
|
||||
for len(int_part) > 1 && int_part[0] == '0' {
|
||||
int_part = int_part[1:]
|
||||
}
|
||||
result.integer_part = int_part
|
||||
|
||||
// Remove trailing zeros from fractional part
|
||||
frac_part := num.fractional_part
|
||||
for len(frac_part) > 0 && frac_part[len(frac_part)-1] == '0' {
|
||||
frac_part = frac_part[:len(frac_part)-1]
|
||||
}
|
||||
result.fractional_part = frac_part
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Check if a DDB_Number represents zero
|
||||
is_ddb_number_zero :: proc(num: DDB_Number) -> bool {
|
||||
// Check if integer part is all zeros
|
||||
for c in num.integer_part {
|
||||
if c != '0' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Check if fractional part is all zeros
|
||||
for c in num.fractional_part {
|
||||
if c != '0' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Convert DDB_Number to string representation
|
||||
ddb_number_to_string :: proc(num: DDB_Number) -> string {
|
||||
builder := strings.builder_make()
|
||||
|
||||
if !num.sign {
|
||||
strings.write_string(&builder, "-")
|
||||
}
|
||||
|
||||
strings.write_string(&builder, num.integer_part)
|
||||
|
||||
if len(num.fractional_part) > 0 {
|
||||
strings.write_string(&builder, ".")
|
||||
strings.write_string(&builder, num.fractional_part)
|
||||
}
|
||||
|
||||
if num.exponent != 0 {
|
||||
fmt.sbprintf(&builder, "e%d", num.exponent)
|
||||
}
|
||||
|
||||
return strings.to_string(builder)
|
||||
}
|
||||
|
||||
// Compare two DDB_Numbers
|
||||
// Returns: -1 if a < b, 0 if a == b, 1 if a > b
|
||||
compare_ddb_numbers :: proc(a: DDB_Number, b: DDB_Number) -> int {
|
||||
// Handle zero cases
|
||||
a_zero := is_ddb_number_zero(a)
|
||||
b_zero := is_ddb_number_zero(b)
|
||||
|
||||
if a_zero && b_zero {
|
||||
return 0
|
||||
}
|
||||
if a_zero {
|
||||
return b.sign ? -1 : 1 // 0 < positive, 0 > negative
|
||||
}
|
||||
if b_zero {
|
||||
return a.sign ? 1 : -1 // positive > 0, negative < 0
|
||||
}
|
||||
|
||||
// Different signs
|
||||
if a.sign != b.sign {
|
||||
return a.sign ? 1 : -1 // positive > negative
|
||||
}
|
||||
|
||||
// Same sign - compare magnitudes
|
||||
mag_cmp := compare_ddb_number_magnitudes(a, b)
|
||||
|
||||
// If negative, reverse the comparison
|
||||
if !a.sign {
|
||||
return -mag_cmp
|
||||
}
|
||||
return mag_cmp
|
||||
}
|
||||
|
||||
// Compare magnitudes (absolute values) of two DDB_Numbers
|
||||
compare_ddb_number_magnitudes :: proc(a: DDB_Number, b: DDB_Number) -> int {
|
||||
// Adjust for exponents first
|
||||
a_adj := adjust_for_exponent(a)
|
||||
b_adj := adjust_for_exponent(b)
|
||||
|
||||
// Compare integer parts length
|
||||
if len(a_adj.integer_part) != len(b_adj.integer_part) {
|
||||
return len(a_adj.integer_part) > len(b_adj.integer_part) ? 1 : -1
|
||||
}
|
||||
|
||||
// Compare integer parts digit by digit
|
||||
for i in 0..<len(a_adj.integer_part) {
|
||||
if a_adj.integer_part[i] != b_adj.integer_part[i] {
|
||||
return a_adj.integer_part[i] > b_adj.integer_part[i] ? 1 : -1
|
||||
}
|
||||
}
|
||||
|
||||
// Integer parts equal, compare fractional parts
|
||||
max_frac_len := max(len(a_adj.fractional_part), len(b_adj.fractional_part))
|
||||
for i in 0..<max_frac_len {
|
||||
a_digit := i < len(a_adj.fractional_part) ? a_adj.fractional_part[i] : '0'
|
||||
b_digit := i < len(b_adj.fractional_part) ? b_adj.fractional_part[i] : '0'
|
||||
|
||||
if a_digit != b_digit {
|
||||
return a_digit > b_digit ? 1 : -1
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Adjust a number for its exponent (conceptually multiply by 10^exponent)
|
||||
adjust_for_exponent :: proc(num: DDB_Number) -> DDB_Number {
|
||||
if num.exponent == 0 {
|
||||
return num
|
||||
}
|
||||
|
||||
result := num
|
||||
result.exponent = 0
|
||||
|
||||
if num.exponent > 0 {
|
||||
// Shift decimal point right
|
||||
exp := int(num.exponent)
|
||||
frac := num.fractional_part
|
||||
|
||||
// Move fractional digits to integer part
|
||||
shift := min(exp, len(frac))
|
||||
result.integer_part = strings.concatenate({num.integer_part, frac[:shift]})
|
||||
result.fractional_part = frac[shift:]
|
||||
|
||||
// Add zeros if needed
|
||||
if exp > len(frac) {
|
||||
zeros := strings.repeat("0", exp - len(frac))
|
||||
result.integer_part = strings.concatenate({result.integer_part, zeros})
|
||||
}
|
||||
} else {
|
||||
// Shift decimal point left
|
||||
exp := -int(num.exponent)
|
||||
int_part := num.integer_part
|
||||
|
||||
// Move integer digits to fractional part
|
||||
shift := min(exp, len(int_part))
|
||||
result.integer_part = int_part[:len(int_part)-shift]
|
||||
if len(result.integer_part) == 0 {
|
||||
result.integer_part = "0"
|
||||
}
|
||||
result.fractional_part = strings.concatenate({
|
||||
int_part[len(int_part)-shift:],
|
||||
num.fractional_part,
|
||||
})
|
||||
|
||||
// Add leading zeros if needed
|
||||
if exp > len(int_part) {
|
||||
zeros := strings.repeat("0", exp - len(int_part))
|
||||
result.fractional_part = strings.concatenate({zeros, result.fractional_part})
|
||||
}
|
||||
}
|
||||
|
||||
return normalize_ddb_number(result)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Canonical Encoding for Sort Keys
|
||||
//
|
||||
// For numbers to sort correctly in byte-wise comparisons, we need a
|
||||
// canonical encoding that preserves numeric ordering.
|
||||
//
|
||||
// Encoding format:
|
||||
// - 1 byte: sign/magnitude marker
|
||||
// - 0x00: negative infinity (reserved)
|
||||
// - 0x01-0x7F: negative numbers (inverted magnitude)
|
||||
// - 0x80: zero
|
||||
// - 0x81-0xFE: positive numbers (magnitude)
|
||||
// - 0xFF: positive infinity (reserved)
|
||||
// - N bytes: encoded magnitude (variable length)
|
||||
//
|
||||
// For positive numbers: we encode the magnitude directly with leading byte
|
||||
// indicating number of integer digits.
|
||||
//
|
||||
// For negative numbers: we encode the magnitude inverted (bitwise NOT) so
|
||||
// that larger negative numbers sort before smaller ones.
|
||||
// ============================================================================
|
||||
|
||||
// Encode a DDB_Number into canonical byte form for sort keys
|
||||
encode_ddb_number_for_sort :: proc(num: DDB_Number) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 64, context.allocator)
|
||||
|
||||
if is_ddb_number_zero(num) {
|
||||
bytes.buffer_write_byte(&buf, 0x80)
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Get normalized magnitude
|
||||
norm := normalize_ddb_number(num)
|
||||
adj := adjust_for_exponent(norm)
|
||||
|
||||
// Encode magnitude bytes
|
||||
mag_bytes := encode_magnitude(adj)
|
||||
|
||||
if num.sign {
|
||||
// Positive number: 0x81 + magnitude
|
||||
bytes.buffer_write_byte(&buf, 0x81)
|
||||
bytes.buffer_write(&buf, mag_bytes)
|
||||
} else {
|
||||
// Negative number: 0x7F - inverted magnitude
|
||||
bytes.buffer_write_byte(&buf, 0x7F)
|
||||
// Invert all magnitude bytes
|
||||
for b in mag_bytes {
|
||||
bytes.buffer_write_byte(&buf, ~b)
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Encode the magnitude of a number (integer + fractional parts)
|
||||
encode_magnitude :: proc(num: DDB_Number) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 32, context.allocator)
|
||||
|
||||
// Write length of integer part as varint
|
||||
int_len := u64(len(num.integer_part))
|
||||
encode_varint(&buf, int_len)
|
||||
|
||||
// Write integer digits
|
||||
bytes.buffer_write_string(&buf, num.integer_part)
|
||||
|
||||
// Write fractional digits if any
|
||||
if len(num.fractional_part) > 0 {
|
||||
bytes.buffer_write_string(&buf, num.fractional_part)
|
||||
}
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Decode a canonically encoded number back to DDB_Number
|
||||
decode_ddb_number_from_sort :: proc(data: []byte) -> (DDB_Number, bool) {
|
||||
if len(data) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
marker := data[0]
|
||||
|
||||
// Zero
|
||||
if marker == 0x80 {
|
||||
return DDB_Number{
|
||||
sign = true,
|
||||
integer_part = "0",
|
||||
fractional_part = "",
|
||||
exponent = 0,
|
||||
}, true
|
||||
}
|
||||
|
||||
// Positive number
|
||||
if marker == 0x81 {
|
||||
return decode_magnitude(data[1:], true)
|
||||
}
|
||||
|
||||
// Negative number (inverted bytes)
|
||||
if marker == 0x7F {
|
||||
// Un-invert the bytes
|
||||
inverted := make([]byte, len(data)-1)
|
||||
defer delete(inverted)
|
||||
for i in 0..<len(inverted) {
|
||||
inverted[i] = ~data[i+1]
|
||||
}
|
||||
return decode_magnitude(inverted, false)
|
||||
}
|
||||
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Decode magnitude bytes back to a DDB_Number
|
||||
decode_magnitude :: proc(data: []byte, positive: bool) -> (DDB_Number, bool) {
|
||||
if len(data) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Read integer length
|
||||
int_len, bytes_read := decode_varint(data)
|
||||
if bytes_read == 0 || int_len == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
offset := bytes_read
|
||||
|
||||
// Read integer part
|
||||
if offset + int(int_len) > len(data) {
|
||||
return {}, false
|
||||
}
|
||||
int_part := string(data[offset:offset + int(int_len)])
|
||||
offset += int(int_len)
|
||||
|
||||
// Read fractional part if any
|
||||
frac_part := ""
|
||||
if offset < len(data) {
|
||||
frac_part = string(data[offset:])
|
||||
}
|
||||
|
||||
return DDB_Number{
|
||||
sign = positive,
|
||||
integer_part = int_part,
|
||||
fractional_part = frac_part,
|
||||
exponent = 0,
|
||||
}, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Decimal Arithmetic (38-digit precision, no float conversion)
|
||||
// ============================================================================
|
||||
|
||||
MAX_DDB_PRECISION :: 38
|
||||
|
||||
// Add two DDB_Numbers with full decimal precision.
|
||||
// Returns an owned DDB_Number.
|
||||
add_ddb_numbers :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
if is_ddb_number_zero(a) { return clone_ddb_number(b), true }
|
||||
if is_ddb_number_zero(b) { return clone_ddb_number(a), true }
|
||||
|
||||
if a.sign == b.sign {
|
||||
// Same sign: add magnitudes, keep sign
|
||||
result, ok := add_magnitudes(a, b)
|
||||
if !ok { return {}, false }
|
||||
result.sign = a.sign
|
||||
return result, true
|
||||
}
|
||||
|
||||
// Different signs: subtract smaller magnitude from larger
|
||||
cmp := compare_ddb_number_magnitudes(a, b)
|
||||
if cmp == 0 {
|
||||
return DDB_Number{
|
||||
sign = true,
|
||||
integer_part = strings.clone("0"),
|
||||
fractional_part = strings.clone(""),
|
||||
exponent = 0,
|
||||
}, true
|
||||
}
|
||||
|
||||
if cmp > 0 {
|
||||
result, ok := subtract_magnitudes(a, b)
|
||||
if !ok { return {}, false }
|
||||
result.sign = a.sign
|
||||
return result, true
|
||||
} else {
|
||||
result, ok := subtract_magnitudes(b, a)
|
||||
if !ok { return {}, false }
|
||||
result.sign = b.sign
|
||||
return result, true
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract two DDB_Numbers: a - b
|
||||
subtract_ddb_numbers :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
neg_b := b
|
||||
neg_b.sign = !b.sign
|
||||
return add_ddb_numbers(a, neg_b)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Internal arithmetic helpers
|
||||
// ============================================================================
|
||||
|
||||
// Expand a DDB_Number to effective integer and fractional digit bytes
|
||||
// with the exponent fully applied. Returns heap-allocated slices (caller frees).
|
||||
@(private="file")
|
||||
expand_digits :: proc(num: DDB_Number) -> (int_digits: []u8, frac_digits: []u8) {
|
||||
dp := len(num.integer_part) + int(num.exponent)
|
||||
all_len := len(num.integer_part) + len(num.fractional_part)
|
||||
|
||||
if dp <= 0 {
|
||||
// Everything is fractional, need leading zeros
|
||||
frac := make([]u8, -dp + all_len)
|
||||
for i in 0..<(-dp) {
|
||||
frac[i] = '0'
|
||||
}
|
||||
for i in 0..<len(num.integer_part) {
|
||||
frac[-dp + i] = num.integer_part[i]
|
||||
}
|
||||
for i in 0..<len(num.fractional_part) {
|
||||
frac[-dp + len(num.integer_part) + i] = num.fractional_part[i]
|
||||
}
|
||||
|
||||
int_d := make([]u8, 1)
|
||||
int_d[0] = '0'
|
||||
return int_d, frac
|
||||
}
|
||||
|
||||
if dp >= all_len {
|
||||
// Everything is integer, may need trailing zeros
|
||||
int_d := make([]u8, dp)
|
||||
for i in 0..<len(num.integer_part) {
|
||||
int_d[i] = num.integer_part[i]
|
||||
}
|
||||
for i in 0..<len(num.fractional_part) {
|
||||
int_d[len(num.integer_part) + i] = num.fractional_part[i]
|
||||
}
|
||||
for i in all_len..<dp {
|
||||
int_d[i] = '0'
|
||||
}
|
||||
return int_d, nil
|
||||
}
|
||||
|
||||
// Decimal point falls within the original integer_part
|
||||
if dp <= len(num.integer_part) {
|
||||
int_d := make([]u8, dp)
|
||||
for i in 0..<dp {
|
||||
int_d[i] = num.integer_part[i]
|
||||
}
|
||||
|
||||
frac_len := (len(num.integer_part) - dp) + len(num.fractional_part)
|
||||
frac := make([]u8, frac_len)
|
||||
for i in dp..<len(num.integer_part) {
|
||||
frac[i - dp] = num.integer_part[i]
|
||||
}
|
||||
offset := len(num.integer_part) - dp
|
||||
for i in 0..<len(num.fractional_part) {
|
||||
frac[offset + i] = num.fractional_part[i]
|
||||
}
|
||||
return int_d, frac
|
||||
}
|
||||
|
||||
// Decimal point falls within the original fractional_part
|
||||
frac_split := dp - len(num.integer_part)
|
||||
|
||||
int_d := make([]u8, dp)
|
||||
for i in 0..<len(num.integer_part) {
|
||||
int_d[i] = num.integer_part[i]
|
||||
}
|
||||
for i in 0..<frac_split {
|
||||
int_d[len(num.integer_part) + i] = num.fractional_part[i]
|
||||
}
|
||||
|
||||
remaining := len(num.fractional_part) - frac_split
|
||||
frac: []u8 = nil
|
||||
if remaining > 0 {
|
||||
frac = make([]u8, remaining)
|
||||
for i in frac_split..<len(num.fractional_part) {
|
||||
frac[i - frac_split] = num.fractional_part[i]
|
||||
}
|
||||
}
|
||||
return int_d, frac
|
||||
}
|
||||
|
||||
// Normalize a DDB_Number that owns its strings.
|
||||
// Clones the trimmed result, frees the originals.
|
||||
@(private="file")
|
||||
normalize_owned :: proc(num: DDB_Number) -> DDB_Number {
|
||||
norm := normalize_ddb_number(num)
|
||||
|
||||
// Clone the normalized subslices BEFORE freeing originals
|
||||
new_int := strings.clone(norm.integer_part)
|
||||
new_frac := strings.clone(norm.fractional_part)
|
||||
|
||||
// Free the originals
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
|
||||
return DDB_Number{
|
||||
sign = norm.sign,
|
||||
integer_part = new_int,
|
||||
fractional_part = new_frac,
|
||||
exponent = norm.exponent,
|
||||
}
|
||||
}
|
||||
|
||||
// Add absolute values. Returns owned DDB_Number (sign=true).
|
||||
@(private="file")
|
||||
add_magnitudes :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
a_int, a_frac := expand_digits(a)
|
||||
b_int, b_frac := expand_digits(b)
|
||||
defer { delete(a_int); delete(a_frac); delete(b_int); delete(b_frac) }
|
||||
|
||||
max_int := max(len(a_int), len(b_int))
|
||||
max_frac := max(len(a_frac), len(b_frac))
|
||||
total := max_int + max_frac
|
||||
|
||||
// Build zero-padded aligned arrays
|
||||
a_aligned := make([]u8, total)
|
||||
b_aligned := make([]u8, total)
|
||||
defer { delete(a_aligned); delete(b_aligned) }
|
||||
|
||||
for i in 0..<total { a_aligned[i] = '0'; b_aligned[i] = '0' }
|
||||
|
||||
// Integer digits: right-aligned in [0..max_int)
|
||||
a_off := max_int - len(a_int)
|
||||
b_off := max_int - len(b_int)
|
||||
for i in 0..<len(a_int) { a_aligned[a_off + i] = a_int[i] }
|
||||
for i in 0..<len(b_int) { b_aligned[b_off + i] = b_int[i] }
|
||||
|
||||
// Fractional digits: left-aligned in [max_int..total)
|
||||
for i in 0..<len(a_frac) { a_aligned[max_int + i] = a_frac[i] }
|
||||
for i in 0..<len(b_frac) { b_aligned[max_int + i] = b_frac[i] }
|
||||
|
||||
// Add right-to-left
|
||||
result := make([]u8, total + 1) // +1 for carry
|
||||
carry: u8 = 0
|
||||
for i := total - 1; i >= 0; i -= 1 {
|
||||
sum := (a_aligned[i] - '0') + (b_aligned[i] - '0') + carry
|
||||
result[i + 1] = (sum % 10) + '0'
|
||||
carry = sum / 10
|
||||
}
|
||||
result[0] = carry + '0'
|
||||
|
||||
// Split: decimal point is at max_int + 1 (carry slot shifts everything)
|
||||
int_end := max_int + 1
|
||||
int_str := strings.clone(string(result[:int_end]))
|
||||
frac_str := strings.clone(string(result[int_end:]))
|
||||
delete(result)
|
||||
|
||||
num := normalize_owned(DDB_Number{
|
||||
sign = true,
|
||||
integer_part = int_str,
|
||||
fractional_part = frac_str,
|
||||
exponent = 0,
|
||||
})
|
||||
|
||||
if len(num.integer_part) + len(num.fractional_part) > MAX_DDB_PRECISION {
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return num, true
|
||||
}
|
||||
|
||||
// Subtract absolute values: |a| - |b|, where |a| >= |b|.
|
||||
// Returns owned DDB_Number (sign=true).
|
||||
@(private="file")
|
||||
subtract_magnitudes :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
a_int, a_frac := expand_digits(a)
|
||||
b_int, b_frac := expand_digits(b)
|
||||
defer { delete(a_int); delete(a_frac); delete(b_int); delete(b_frac) }
|
||||
|
||||
max_int := max(len(a_int), len(b_int))
|
||||
max_frac := max(len(a_frac), len(b_frac))
|
||||
total := max_int + max_frac
|
||||
|
||||
a_aligned := make([]u8, total)
|
||||
b_aligned := make([]u8, total)
|
||||
defer { delete(a_aligned); delete(b_aligned) }
|
||||
|
||||
for i in 0..<total { a_aligned[i] = '0'; b_aligned[i] = '0' }
|
||||
|
||||
a_off := max_int - len(a_int)
|
||||
b_off := max_int - len(b_int)
|
||||
for i in 0..<len(a_int) { a_aligned[a_off + i] = a_int[i] }
|
||||
for i in 0..<len(b_int) { b_aligned[b_off + i] = b_int[i] }
|
||||
for i in 0..<len(a_frac) { a_aligned[max_int + i] = a_frac[i] }
|
||||
for i in 0..<len(b_frac) { b_aligned[max_int + i] = b_frac[i] }
|
||||
|
||||
// Subtract right-to-left
|
||||
result := make([]u8, total)
|
||||
borrow: u8 = 0
|
||||
for i := total - 1; i >= 0; i -= 1 {
|
||||
ad := a_aligned[i] - '0'
|
||||
bd := (b_aligned[i] - '0') + borrow
|
||||
if ad < bd {
|
||||
ad += 10
|
||||
borrow = 1
|
||||
} else {
|
||||
borrow = 0
|
||||
}
|
||||
result[i] = (ad - bd) + '0'
|
||||
}
|
||||
|
||||
int_str := strings.clone(string(result[:max_int]))
|
||||
frac_str := strings.clone(string(result[max_int:]))
|
||||
delete(result)
|
||||
|
||||
if len(int_str) == 0 {
|
||||
delete(int_str)
|
||||
int_str = strings.clone("0")
|
||||
}
|
||||
|
||||
num := normalize_owned(DDB_Number{
|
||||
sign = true,
|
||||
integer_part = int_str,
|
||||
fractional_part = frac_str,
|
||||
exponent = 0,
|
||||
})
|
||||
|
||||
if len(num.integer_part) + len(num.fractional_part) > MAX_DDB_PRECISION {
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return num, true
|
||||
}
|
||||
|
||||
// Format a DDB_Number for display
|
||||
format_ddb_number :: proc(num: DDB_Number) -> string {
|
||||
// Normalize first
|
||||
norm := normalize_ddb_number(num)
|
||||
|
||||
// Check if it's effectively an integer
|
||||
if len(norm.fractional_part) == 0 && norm.exponent >= 0 {
|
||||
builder := strings.builder_make()
|
||||
if !norm.sign {
|
||||
strings.write_string(&builder, "-")
|
||||
}
|
||||
strings.write_string(&builder, norm.integer_part)
|
||||
// Add trailing zeros for positive exponent
|
||||
for _ in 0..<norm.exponent {
|
||||
strings.write_string(&builder, "0")
|
||||
}
|
||||
return strings.to_string(builder)
|
||||
}
|
||||
|
||||
// Otherwise use full representation
|
||||
return ddb_number_to_string(norm)
|
||||
}
|
||||
|
||||
// Clones a ddb number type
|
||||
clone_ddb_number :: proc(num: DDB_Number) -> DDB_Number {
|
||||
return DDB_Number{
|
||||
sign = num.sign,
|
||||
integer_part = strings.clone(num.integer_part),
|
||||
fractional_part = strings.clone(num.fractional_part),
|
||||
exponent = num.exponent,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: encode_varint (you already have this in your codebase)
|
||||
@(private="file")
|
||||
encode_varint :: proc(buf: ^bytes.Buffer, value: u64) {
|
||||
v := value
|
||||
for {
|
||||
byte_val := u8(v & 0x7F)
|
||||
v >>= 7
|
||||
if v != 0 {
|
||||
byte_val |= 0x80
|
||||
}
|
||||
bytes.buffer_write_byte(buf, byte_val)
|
||||
if v == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: decode_varint
|
||||
@(private="file")
|
||||
decode_varint :: proc(data: []byte) -> (value: u64, bytes_read: int) {
|
||||
shift: u64 = 0
|
||||
for i in 0..<len(data) {
|
||||
byte_val := data[i]
|
||||
value |= u64(byte_val & 0x7F) << shift
|
||||
bytes_read = i + 1
|
||||
if (byte_val & 0x80) == 0 {
|
||||
return
|
||||
}
|
||||
shift += 7
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
@@ -22,6 +22,9 @@ Storage_Error :: enum {
|
||||
Serialization_Error,
|
||||
RocksDB_Error,
|
||||
Out_Of_Memory,
|
||||
Validation_Error,
|
||||
Request_Too_Large,
|
||||
Internal_Error,
|
||||
}
|
||||
|
||||
// Result type for Scan operations with pagination
|
||||
@@ -190,10 +193,14 @@ remove_table_lock :: proc(engine: ^Storage_Engine, table_name: string) {
|
||||
sync.mutex_lock(&engine.table_locks_mutex)
|
||||
defer sync.mutex_unlock(&engine.table_locks_mutex)
|
||||
|
||||
if lock, found := engine.table_locks[table_name]; found {
|
||||
delete(table_name, engine.allocator)
|
||||
free(lock, engine.allocator)
|
||||
delete_key(&engine.table_locks, table_name)
|
||||
// Find the actual heap-allocated key string from the map
|
||||
for key, lock in engine.table_locks {
|
||||
if key == table_name {
|
||||
delete_key(&engine.table_locks, key)
|
||||
delete(key, engine.allocator) // free the map's owned key!
|
||||
free(lock, engine.allocator)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,7 +247,13 @@ serialize_table_metadata :: proc(metadata: ^Table_Metadata) -> ([]byte, bool) {
|
||||
|
||||
// Add other metadata
|
||||
meta_item["TableStatus"] = String(strings.clone(table_status_to_string(metadata.table_status)))
|
||||
meta_item["CreationDateTime"] = Number(fmt.aprint(metadata.creation_date_time))
|
||||
ts_str := fmt.aprint(metadata.creation_date_time)
|
||||
ts_num, ts_ok := parse_ddb_number(ts_str)
|
||||
if ts_ok {
|
||||
meta_item["CreationDateTime"] = ts_num
|
||||
} else {
|
||||
meta_item["CreationDateTime"] = String(strings.clone(ts_str))
|
||||
}
|
||||
|
||||
// Encode GSI definitions as JSON string
|
||||
if gsis, has_gsis := metadata.global_secondary_indexes.?; has_gsis && len(gsis) > 0 {
|
||||
@@ -260,7 +273,7 @@ serialize_table_metadata :: proc(metadata: ^Table_Metadata) -> ([]byte, bool) {
|
||||
fmt.sbprintf(&gsi_builder, `{{"AttributeName":"%s","KeyType":"%s"}}`,
|
||||
ks.attribute_name, key_type_to_string(ks.key_type))
|
||||
}
|
||||
strings.write_string(&gsi_builder, `],"Projection":{{"ProjectionType":"`)
|
||||
strings.write_string(&gsi_builder, `],"Projection":{"ProjectionType":"`)
|
||||
switch gsi.projection.projection_type {
|
||||
case .ALL: strings.write_string(&gsi_builder, "ALL")
|
||||
case .KEYS_ONLY: strings.write_string(&gsi_builder, "KEYS_ONLY")
|
||||
@@ -311,8 +324,9 @@ deserialize_table_metadata :: proc(data: []byte, allocator: mem.Allocator) -> (T
|
||||
// Parse creation date time
|
||||
if time_val, found := meta_item["CreationDateTime"]; found {
|
||||
#partial switch v in time_val {
|
||||
case Number:
|
||||
val, parse_ok := strconv.parse_i64(string(v))
|
||||
case DDB_Number:
|
||||
num_str := format_ddb_number(v)
|
||||
val, parse_ok := strconv.parse_i64(num_str)
|
||||
metadata.creation_date_time = val if parse_ok else 0
|
||||
}
|
||||
}
|
||||
@@ -518,6 +532,10 @@ get_table_metadata :: proc(engine: ^Storage_Engine, table_name: string) -> (Tabl
|
||||
return {}, .Serialization_Error
|
||||
}
|
||||
|
||||
// table_name is not stored in the serialized blob (it IS the RocksDB key),
|
||||
// so we populate it here from the argument we already have.
|
||||
metadata.table_name = strings.clone(table_name, engine.allocator)
|
||||
|
||||
return metadata, .None
|
||||
}
|
||||
|
||||
@@ -729,6 +747,7 @@ delete_table :: proc(engine: ^Storage_Engine, table_name: string) -> Storage_Err
|
||||
// ============================================================================
|
||||
|
||||
// Put item — uses EXCLUSIVE lock (write operation)
|
||||
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI updates are atomic
|
||||
put_item :: proc(engine: ^Storage_Engine, table_name: string, item: Item) -> Storage_Error {
|
||||
table_lock := get_or_create_table_lock(engine, table_name)
|
||||
sync.rw_mutex_lock(table_lock)
|
||||
@@ -771,34 +790,67 @@ put_item :: proc(engine: ^Storage_Engine, table_name: string, item: Item) -> Sto
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// --- GSI cleanup: delete OLD GSI entries if item already exists ---
|
||||
// --- Check if item already exists (need old item for GSI cleanup) ---
|
||||
old_item: Maybe(Item) = nil
|
||||
existing_value, existing_err := rocksdb.db_get(&engine.db, storage_key)
|
||||
if existing_err == .None && existing_value != nil {
|
||||
if existing_err == .NotFound {
|
||||
// Item does not exist — nothing to clean up, proceed normally.
|
||||
} else if existing_err != .None {
|
||||
// Unexpected RocksDB I/O error — fail closed to avoid orphaned GSI entries.
|
||||
return .RocksDB_Error
|
||||
} else if existing_value != nil {
|
||||
defer delete(existing_value)
|
||||
old_item, decode_ok := decode(existing_value)
|
||||
if decode_ok {
|
||||
defer item_destroy(&old_item)
|
||||
gsi_delete_entries(engine, table_name, old_item, &metadata)
|
||||
decoded_old, decode_ok := decode(existing_value)
|
||||
if !decode_ok {
|
||||
// Value exists but is unreadable — fail closed rather than leaving
|
||||
// stale GSI entries behind after the overwrite.
|
||||
return .Serialization_Error
|
||||
}
|
||||
old_item = decoded_old
|
||||
}
|
||||
// Cleanup old_item at the end
|
||||
defer {
|
||||
if old, has_old := old_item.?; has_old {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
}
|
||||
|
||||
// Encode item
|
||||
// Encode new item
|
||||
encoded_item, encode_ok := encode(item)
|
||||
if !encode_ok {
|
||||
return .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
// Store in RocksDB
|
||||
put_err := rocksdb.db_put(&engine.db, storage_key, encoded_item)
|
||||
if put_err != .None {
|
||||
// --- ATOMIC WRITE BATCH: base item + all GSI updates ---
|
||||
batch, batch_err := rocksdb.batch_create()
|
||||
if batch_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
defer rocksdb.batch_destroy(&batch)
|
||||
|
||||
// --- GSI maintenance: write NEW GSI entries ---
|
||||
gsi_err := gsi_write_entries(engine, table_name, item, &metadata)
|
||||
if gsi_err != .None {
|
||||
return gsi_err
|
||||
// Add base item write to batch
|
||||
rocksdb.batch_put(&batch, storage_key, encoded_item)
|
||||
|
||||
// Add old GSI entry deletions to batch (if item existed)
|
||||
if old, has_old := old_item.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(&batch, table_name, old, &metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new GSI entry writes to batch
|
||||
gsi_write_err := gsi_batch_write_entries(&batch, table_name, item, &metadata)
|
||||
if gsi_write_err != .None {
|
||||
return gsi_write_err
|
||||
}
|
||||
|
||||
// Write batch atomically - ALL or NOTHING
|
||||
write_err := rocksdb.batch_write(&engine.db, &batch)
|
||||
if write_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
|
||||
return .None
|
||||
@@ -861,6 +913,7 @@ get_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> (May
|
||||
}
|
||||
|
||||
// Delete item — uses EXCLUSIVE lock (write operation)
|
||||
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI deletions are atomic
|
||||
delete_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> Storage_Error {
|
||||
table_lock := get_or_create_table_lock(engine, table_name)
|
||||
sync.rw_mutex_lock(table_lock)
|
||||
@@ -897,20 +950,59 @@ delete_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> S
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// --- GSI cleanup: read existing item to know which GSI entries to remove ---
|
||||
// --- Read existing item to know which GSI entries to remove ---
|
||||
old_item: Maybe(Item) = nil
|
||||
existing_value, existing_err := rocksdb.db_get(&engine.db, storage_key)
|
||||
if existing_err == .None && existing_value != nil {
|
||||
if existing_err == .NotFound {
|
||||
// Item does not exist — nothing to delete (DynamoDB idempotent delete).
|
||||
return .None
|
||||
} else if existing_err != .None {
|
||||
// Unexpected RocksDB I/O error — fail closed.
|
||||
return .RocksDB_Error
|
||||
} else if existing_value != nil {
|
||||
defer delete(existing_value)
|
||||
old_item, decode_ok := decode(existing_value)
|
||||
if decode_ok {
|
||||
defer item_destroy(&old_item)
|
||||
gsi_delete_entries(engine, table_name, old_item, &metadata)
|
||||
decoded_old, decode_ok := decode(existing_value)
|
||||
if !decode_ok {
|
||||
// Value exists but is corrupt — fail closed rather than deleting the
|
||||
// base item while leaving its GSI entries dangling.
|
||||
return .Serialization_Error
|
||||
}
|
||||
old_item = decoded_old
|
||||
}
|
||||
// Cleanup old_item at the end
|
||||
defer {
|
||||
if old, has_old := old_item.?; has_old {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete from RocksDB
|
||||
del_err := rocksdb.db_delete(&engine.db, storage_key)
|
||||
if del_err != .None {
|
||||
// If item doesn't exist (existing_value was nil with no error), nothing to delete.
|
||||
if _, has_old := old_item.?; !has_old {
|
||||
return .None
|
||||
}
|
||||
|
||||
// --- ATOMIC WRITE BATCH: base item deletion + all GSI deletions ---
|
||||
batch, batch_err := rocksdb.batch_create()
|
||||
if batch_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
defer rocksdb.batch_destroy(&batch)
|
||||
|
||||
// Add base item delete to batch
|
||||
rocksdb.batch_delete(&batch, storage_key)
|
||||
|
||||
// Add GSI entry deletions to batch
|
||||
if old, has_old := old_item.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(&batch, table_name, old, &metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Write batch atomically - ALL or NOTHING
|
||||
write_err := rocksdb.batch_write(&engine.db, &batch)
|
||||
if write_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
|
||||
@@ -1166,6 +1258,43 @@ evaluate_sort_key_condition :: proc(item: Item, skc: ^Sort_Key_Condition) -> boo
|
||||
return false
|
||||
}
|
||||
|
||||
// Use numeric comparison if both sides are DDB_Number
|
||||
item_num, item_is_num := attr.(DDB_Number)
|
||||
cond_num, cond_is_num := skc.value.(DDB_Number)
|
||||
|
||||
if item_is_num && cond_is_num {
|
||||
cmp := compare_ddb_numbers(item_num, cond_num)
|
||||
|
||||
switch skc.operator {
|
||||
case .EQ:
|
||||
return cmp == 0
|
||||
case .LT:
|
||||
return cmp < 0
|
||||
case .LE:
|
||||
return cmp <= 0
|
||||
case .GT:
|
||||
return cmp > 0
|
||||
case .GE:
|
||||
return cmp >= 0
|
||||
case .BETWEEN:
|
||||
if v2, has_v2 := skc.value2.?; has_v2 {
|
||||
upper_num, upper_ok := v2.(DDB_Number)
|
||||
if !upper_ok {
|
||||
return false
|
||||
}
|
||||
cmp2 := compare_ddb_numbers(item_num, upper_num)
|
||||
return cmp >= 0 && cmp2 <= 0
|
||||
}
|
||||
return false
|
||||
case .BEGINS_WITH:
|
||||
// begins_with is not a valid operator for Number sort keys.
|
||||
// DynamoDB rejects this at validation time. Return false.
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Fallback: string comparison for S/B types
|
||||
item_sk_str, ok1 := attr_value_to_string_for_compare(attr)
|
||||
if !ok1 {
|
||||
return false
|
||||
@@ -1212,8 +1341,10 @@ attr_value_to_string_for_compare :: proc(attr: Attribute_Value) -> (string, bool
|
||||
#partial switch v in attr {
|
||||
case String:
|
||||
return string(v), true
|
||||
case Number:
|
||||
return string(v), true
|
||||
case DDB_Number:
|
||||
// Return formatted string for fallback string comparison
|
||||
// (actual numeric comparison is handled in compare_attribute_values)
|
||||
return format_ddb_number(v), true
|
||||
case Binary:
|
||||
return string(v), true
|
||||
}
|
||||
@@ -1258,7 +1389,7 @@ validate_item_key_types :: proc(
|
||||
#partial switch _ in attr {
|
||||
case String:
|
||||
match = (et == .S)
|
||||
case Number:
|
||||
case DDB_Number:
|
||||
match = (et == .N)
|
||||
case Binary:
|
||||
match = (et == .B)
|
||||
|
||||
@@ -54,6 +54,10 @@ Cancellation_Reason :: struct {
|
||||
}
|
||||
|
||||
transact_write_action_destroy :: proc(action: ^Transact_Write_Action) {
|
||||
delete(action.table_name)
|
||||
if ce, has := action.condition_expr.?; has {
|
||||
delete(ce)
|
||||
}
|
||||
if item, has := action.item.?; has {
|
||||
item_copy := item
|
||||
item_destroy(&item_copy)
|
||||
@@ -124,13 +128,11 @@ transact_write_items :: proc(
|
||||
table_set[action.table_name] = true
|
||||
}
|
||||
|
||||
// Acquire exclusive locks on all tables in deterministic order
|
||||
// to prevent deadlocks
|
||||
table_names := make([dynamic]string, allocator = context.temp_allocator)
|
||||
for name in table_set {
|
||||
append(&table_names, name)
|
||||
}
|
||||
// Simple sort for deterministic lock ordering
|
||||
// Sort for deterministic lock ordering
|
||||
for i := 0; i < len(table_names); i += 1 {
|
||||
for j := i + 1; j < len(table_names); j += 1 {
|
||||
if table_names[j] < table_names[i] {
|
||||
@@ -146,17 +148,15 @@ transact_write_items :: proc(
|
||||
append(&locks, lock)
|
||||
}
|
||||
defer {
|
||||
// Release all locks in reverse order
|
||||
for i := len(locks) - 1; i >= 0; i -= 1 {
|
||||
sync.rw_mutex_unlock(locks[i])
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Step 2: Pre-flight — fetch metadata and existing items, evaluate conditions ----
|
||||
// ---- Step 2: Fetch metadata and evaluate conditions ----
|
||||
reasons := make([]Cancellation_Reason, len(actions))
|
||||
any_failed := false
|
||||
|
||||
// Cache table metadata to avoid redundant lookups
|
||||
metadata_cache := make(map[string]Table_Metadata, allocator = context.temp_allocator)
|
||||
defer {
|
||||
for _, meta in metadata_cache {
|
||||
@@ -166,7 +166,6 @@ transact_write_items :: proc(
|
||||
}
|
||||
|
||||
for action, idx in actions {
|
||||
// Get table metadata (cached)
|
||||
metadata: ^Table_Metadata
|
||||
if cached, found := &metadata_cache[action.table_name]; found {
|
||||
metadata = cached
|
||||
@@ -184,12 +183,11 @@ transact_write_items :: proc(
|
||||
metadata = &metadata_cache[action.table_name]
|
||||
}
|
||||
|
||||
// Determine the key item for this action
|
||||
key_item: Item
|
||||
switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
key_item = item // For Put, key is extracted from the item
|
||||
key_item = item
|
||||
} else {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
@@ -211,9 +209,8 @@ transact_write_items :: proc(
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate ConditionExpression if present
|
||||
// Evaluate ConditionExpression
|
||||
if cond_str, has_cond := action.condition_expr.?; has_cond {
|
||||
// Fetch existing item
|
||||
existing_item, get_err := get_item_internal(engine, action.table_name, key_item, metadata)
|
||||
if get_err != .None && get_err != .Item_Not_Found {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
@@ -230,7 +227,6 @@ transact_write_items :: proc(
|
||||
}
|
||||
}
|
||||
|
||||
// Parse and evaluate condition
|
||||
filter_node, parse_ok := parse_filter_expression(
|
||||
cond_str, action.expr_attr_names, action.expr_attr_values,
|
||||
)
|
||||
@@ -263,13 +259,12 @@ transact_write_items :: proc(
|
||||
}
|
||||
}
|
||||
|
||||
// ConditionCheck actions only validate — they don't mutate
|
||||
if action.type == .Condition_Check {
|
||||
reasons[idx] = Cancellation_Reason{code = "None"}
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate key/item against schema
|
||||
// Validate key/item
|
||||
switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
@@ -286,58 +281,136 @@ transact_write_items :: proc(
|
||||
}
|
||||
}
|
||||
case .Delete, .Update:
|
||||
// Key validation happens during execution
|
||||
// Key validation happens during batch building
|
||||
case .Condition_Check:
|
||||
// Already handled above
|
||||
// Already handled
|
||||
}
|
||||
|
||||
reasons[idx] = Cancellation_Reason{code = "None"}
|
||||
}
|
||||
|
||||
// ---- Step 3: If any condition failed, return cancellation ----
|
||||
if any_failed {
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Cancelled
|
||||
}
|
||||
|
||||
// ---- Step 4: Apply all mutations ----
|
||||
for &action, idx in actions {
|
||||
// ---- Step 3: Build atomic WriteBatch with all operations ----
|
||||
batch, batch_err := rocksdb.batch_create()
|
||||
if batch_err != .None {
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
}
|
||||
defer rocksdb.batch_destroy(&batch)
|
||||
|
||||
// Read old items for GSI cleanup (must happen before batch write)
|
||||
old_items := make([]Maybe(Item), len(actions), allocator = context.temp_allocator)
|
||||
defer {
|
||||
for old_item in old_items {
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for action, idx in actions {
|
||||
if action.type == .Condition_Check {
|
||||
continue
|
||||
}
|
||||
|
||||
metadata := &metadata_cache[action.table_name]
|
||||
|
||||
apply_err := transact_apply_action(engine, &action, metadata)
|
||||
// Read old item if needed for GSI cleanup
|
||||
key_item: Item
|
||||
#partial switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
key_item = item
|
||||
}
|
||||
case .Delete, .Update:
|
||||
if key, has := action.key.?; has {
|
||||
key_item = key
|
||||
}
|
||||
}
|
||||
|
||||
existing, read_err := get_item_internal(engine, action.table_name, key_item, metadata)
|
||||
#partial switch read_err {
|
||||
case .None:
|
||||
// Item found or not found — both fine.
|
||||
case .RocksDB_Error, .Serialization_Error, .Internal_Error:
|
||||
// Cannot safely determine old index keys — cancel the entire transaction.
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "InternalError",
|
||||
message = "Failed to read existing item for index maintenance",
|
||||
}
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
case .Missing_Key_Attribute, .Invalid_Key:
|
||||
// The key we built from the action's own item/key should always be valid
|
||||
// by this point (validated earlier), but treat defensively.
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
message = "Invalid key when reading existing item",
|
||||
}
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
case .Table_Not_Found, .Item_Not_Found, .Validation_Error:
|
||||
// These should not be returned by get_item_internal, but handle
|
||||
// defensively — treat as "item does not exist" and continue.
|
||||
}
|
||||
old_items[idx] = existing
|
||||
}
|
||||
|
||||
// Add all operations to batch
|
||||
for &action, idx in actions {
|
||||
if action.type == .Condition_Check {
|
||||
continue
|
||||
}
|
||||
|
||||
metadata := &metadata_cache[action.table_name]
|
||||
old_item := old_items[idx]
|
||||
|
||||
apply_err := transact_apply_action_batch(&batch, engine, &action, metadata, old_item)
|
||||
if apply_err != .None {
|
||||
// This shouldn't happen after pre-validation, but handle gracefully
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "InternalError",
|
||||
message = "Failed to apply mutation",
|
||||
message = "Failed to build mutation",
|
||||
}
|
||||
// In a real impl we'd need to rollback. For now, report the failure.
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Step 4: Write batch atomically (ALL or NOTHING) ----
|
||||
write_err := rocksdb.batch_write(&engine.db, &batch)
|
||||
if write_err != .None {
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
}
|
||||
|
||||
delete(reasons)
|
||||
return result, .None
|
||||
}
|
||||
|
||||
// Apply a single transact write action (called after all conditions have passed)
|
||||
@(private = "file")
|
||||
transact_apply_action :: proc(
|
||||
transact_apply_action_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
action: ^Transact_Write_Action,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
return put_item_internal(engine, action.table_name, item, metadata)
|
||||
return put_item_batch(batch, engine, action.table_name, item, metadata, old_item)
|
||||
}
|
||||
return .Invalid_Key
|
||||
|
||||
case .Delete:
|
||||
if key, has := action.key.?; has {
|
||||
return delete_item_internal(engine, action.table_name, key, metadata)
|
||||
return delete_item_batch(batch, engine, action.table_name, key, metadata, old_item)
|
||||
}
|
||||
return .Invalid_Key
|
||||
|
||||
@@ -345,19 +418,177 @@ transact_apply_action :: proc(
|
||||
if key, has := action.key.?; has {
|
||||
if plan, has_plan := action.update_plan.?; has_plan {
|
||||
plan_copy := plan
|
||||
_, _, err := update_item_internal(engine, action.table_name, key, &plan_copy, metadata)
|
||||
return err
|
||||
return update_item_batch(batch, engine, action.table_name, key, &plan_copy, metadata, old_item)
|
||||
}
|
||||
return .Invalid_Key
|
||||
}
|
||||
return .Invalid_Key
|
||||
|
||||
case .Condition_Check:
|
||||
return .None // No mutation
|
||||
return .None
|
||||
}
|
||||
return .None
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
put_item_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
item: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(item, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
encoded_item, encode_ok := encode(item)
|
||||
if !encode_ok {
|
||||
return .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
// Add base item to batch
|
||||
rocksdb.batch_put(batch, storage_key, encoded_item)
|
||||
|
||||
// Add old GSI deletions to batch
|
||||
if old, has_old := old_item.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(batch, table_name, old, metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new GSI writes to batch
|
||||
gsi_write_err := gsi_batch_write_entries(batch, table_name, item, metadata)
|
||||
if gsi_write_err != .None {
|
||||
return gsi_write_err
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// Add delete operation to batch (with GSI cleanup)
|
||||
@(private = "file")
|
||||
delete_item_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(key, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// Add base item delete to batch
|
||||
rocksdb.batch_delete(batch, storage_key)
|
||||
|
||||
// Add GSI deletions to batch
|
||||
if old, has_old := old_item.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(batch, table_name, old, metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// Add update operation to batch (with GSI maintenance)
|
||||
@(private = "file")
|
||||
update_item_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key_item: Item,
|
||||
plan: ^Update_Plan,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item_pre: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(key_item, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// Start with existing item or create new
|
||||
existing_item: Item
|
||||
if old, has_old := old_item_pre.?; has_old {
|
||||
existing_item = item_deep_copy(old)
|
||||
} else {
|
||||
existing_item = make(Item)
|
||||
for ks in metadata.key_schema {
|
||||
if val, found := key_item[ks.attribute_name]; found {
|
||||
existing_item[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
defer item_destroy(&existing_item)
|
||||
|
||||
// Apply update plan.
|
||||
if exec_err := execute_update_plan(&existing_item, plan); exec_err != .None {
|
||||
return .Validation_Error
|
||||
}
|
||||
|
||||
// Encode updated item
|
||||
encoded_item, encode_ok := encode(existing_item)
|
||||
if !encode_ok {
|
||||
return .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
// Add base item to batch
|
||||
rocksdb.batch_put(batch, storage_key, encoded_item)
|
||||
|
||||
// Add old GSI deletions to batch
|
||||
if old, has_old := old_item_pre.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(batch, table_name, old, metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new GSI writes to batch
|
||||
gsi_write_err := gsi_batch_write_entries(batch, table_name, existing_item, metadata)
|
||||
if gsi_write_err != .None {
|
||||
return gsi_write_err
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
|
||||
// ============================================================================
|
||||
// Internal storage operations that skip lock acquisition
|
||||
// (Used by transact_write_items which manages its own locking)
|
||||
@@ -400,146 +631,6 @@ get_item_internal :: proc(
|
||||
return item, .None
|
||||
}
|
||||
|
||||
put_item_internal :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
item: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(item, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
encoded_item, encode_ok := encode(item)
|
||||
if !encode_ok {
|
||||
return .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
put_err := rocksdb.db_put(&engine.db, storage_key, encoded_item)
|
||||
if put_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
delete_item_internal :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(key, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
del_err := rocksdb.db_delete(&engine.db, storage_key)
|
||||
if del_err != .None {
|
||||
return .RocksDB_Error
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
update_item_internal :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key_item: Item,
|
||||
plan: ^Update_Plan,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> (old_item: Maybe(Item), new_item: Maybe(Item), err: Storage_Error) {
|
||||
key_struct, key_ok := key_from_item(key_item, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return nil, nil, .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return nil, nil, .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// Fetch existing item
|
||||
existing_encoded, get_err := rocksdb.db_get(&engine.db, storage_key)
|
||||
existing_item: Item
|
||||
|
||||
if get_err == .None && existing_encoded != nil {
|
||||
defer delete(existing_encoded)
|
||||
decoded, decode_ok := decode(existing_encoded)
|
||||
if !decode_ok {
|
||||
return nil, nil, .Serialization_Error
|
||||
}
|
||||
existing_item = decoded
|
||||
old_item = item_deep_copy(existing_item)
|
||||
} else if get_err == .NotFound || existing_encoded == nil {
|
||||
existing_item = make(Item)
|
||||
for ks in metadata.key_schema {
|
||||
if val, found := key_item[ks.attribute_name]; found {
|
||||
existing_item[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
|
||||
if !execute_update_plan(&existing_item, plan) {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .Invalid_Key
|
||||
}
|
||||
|
||||
encoded_item, encode_ok := encode(existing_item)
|
||||
if !encode_ok {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
put_err := rocksdb.db_put(&engine.db, storage_key, encoded_item)
|
||||
if put_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
|
||||
new_item = existing_item
|
||||
return old_item, new_item, .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TransactGetItems Types
|
||||
// ============================================================================
|
||||
@@ -555,8 +646,12 @@ Transact_Get_Result :: struct {
|
||||
}
|
||||
|
||||
transact_get_action_destroy :: proc(action: ^Transact_Get_Action) {
|
||||
delete(action.table_name)
|
||||
item_destroy(&action.key)
|
||||
if proj, has := action.projection.?; has {
|
||||
for path in proj {
|
||||
delete(path)
|
||||
}
|
||||
delete(proj)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,26 +5,25 @@ import "core:strings"
|
||||
|
||||
// DynamoDB AttributeValue - the core data type
|
||||
Attribute_Value :: union {
|
||||
String, // S
|
||||
Number, // N (stored as string)
|
||||
Binary, // B (base64)
|
||||
Bool, // BOOL
|
||||
Null, // NULL
|
||||
String_Set, // SS
|
||||
Number_Set, // NS
|
||||
Binary_Set, // BS
|
||||
List, // L
|
||||
Map, // M
|
||||
String, // S
|
||||
DDB_Number, // N — decimal-preserving numeric type
|
||||
Binary, // B (base64)
|
||||
Bool, // BOOL
|
||||
Null, // NULL
|
||||
String_Set, // SS
|
||||
DDB_Number_Set, // NS
|
||||
Binary_Set, // BS
|
||||
List, // L
|
||||
Map, // M
|
||||
}
|
||||
|
||||
String :: distinct string
|
||||
Number :: distinct string
|
||||
Binary :: distinct string
|
||||
Bool :: distinct bool
|
||||
Null :: distinct bool
|
||||
|
||||
String_Set :: distinct []string
|
||||
Number_Set :: distinct []string
|
||||
DDB_Number_Set :: distinct []DDB_Number
|
||||
Binary_Set :: distinct []string
|
||||
List :: distinct []Attribute_Value
|
||||
Map :: distinct map[string]Attribute_Value
|
||||
@@ -60,7 +59,7 @@ key_from_item :: proc(item: Item, key_schema: []Key_Schema_Element) -> (Key, boo
|
||||
|
||||
// Validate that key is a scalar type (S, N, or B)
|
||||
#partial switch _ in attr {
|
||||
case String, Number, Binary:
|
||||
case String, DDB_Number, Binary:
|
||||
// Valid key type
|
||||
case:
|
||||
return {}, false
|
||||
@@ -116,12 +115,11 @@ key_get_values :: proc(key: ^Key) -> (Key_Values, bool) {
|
||||
#partial switch v in key.pk {
|
||||
case String:
|
||||
pk_bytes = transmute([]byte)string(v)
|
||||
case Number:
|
||||
pk_bytes = transmute([]byte)string(v)
|
||||
case DDB_Number:
|
||||
pk_bytes = encode_ddb_number_for_sort(v)
|
||||
case Binary:
|
||||
pk_bytes = transmute([]byte)string(v)
|
||||
case:
|
||||
// Keys should only be scalar types (S, N, or B)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
@@ -130,12 +128,11 @@ key_get_values :: proc(key: ^Key) -> (Key_Values, bool) {
|
||||
#partial switch v in sk {
|
||||
case String:
|
||||
sk_bytes = transmute([]byte)string(v)
|
||||
case Number:
|
||||
sk_bytes = transmute([]byte)string(v)
|
||||
case DDB_Number:
|
||||
sk_bytes = encode_ddb_number_for_sort(v)
|
||||
case Binary:
|
||||
sk_bytes = transmute([]byte)string(v)
|
||||
case:
|
||||
// Keys should only be scalar types
|
||||
return {}, false
|
||||
}
|
||||
}
|
||||
@@ -366,13 +363,27 @@ error_to_response :: proc(err_type: DynamoDB_Error_Type, message: string) -> str
|
||||
|
||||
// Build an Attribute_Value with the correct scalar type from raw bytes
|
||||
build_attribute_value_with_type :: proc(raw_bytes: []byte, attr_type: Scalar_Attribute_Type) -> Attribute_Value {
|
||||
owned := strings.clone(string(raw_bytes))
|
||||
switch attr_type {
|
||||
case .S: return String(owned)
|
||||
case .N: return Number(owned)
|
||||
case .B: return Binary(owned)
|
||||
case .S:
|
||||
return String(strings.clone(string(raw_bytes)))
|
||||
case .N:
|
||||
// Key bytes are canonical-encoded via encode_ddb_number_for_sort.
|
||||
// Decode them back to a DDB_Number.
|
||||
ddb_num, ok := decode_ddb_number_from_sort(raw_bytes)
|
||||
if ok {
|
||||
return clone_ddb_number(ddb_num)
|
||||
}
|
||||
// Fallback: try interpreting as a plain numeric string
|
||||
fallback_num, fb_ok := parse_ddb_number(string(raw_bytes))
|
||||
if fb_ok {
|
||||
return fallback_num
|
||||
}
|
||||
// Last resort — return as string (shouldn't happen)
|
||||
return String(strings.clone(string(raw_bytes)))
|
||||
case .B:
|
||||
return Binary(strings.clone(string(raw_bytes)))
|
||||
}
|
||||
return String(owned)
|
||||
return String(strings.clone(string(raw_bytes)))
|
||||
}
|
||||
|
||||
// Deep copy an attribute value
|
||||
@@ -380,8 +391,8 @@ attr_value_deep_copy :: proc(attr: Attribute_Value) -> Attribute_Value {
|
||||
switch v in attr {
|
||||
case String:
|
||||
return String(strings.clone(string(v)))
|
||||
case Number:
|
||||
return Number(strings.clone(string(v)))
|
||||
case DDB_Number:
|
||||
return clone_ddb_number(v)
|
||||
case Binary:
|
||||
return Binary(strings.clone(string(v)))
|
||||
case Bool:
|
||||
@@ -394,12 +405,12 @@ attr_value_deep_copy :: proc(attr: Attribute_Value) -> Attribute_Value {
|
||||
ss[i] = strings.clone(s)
|
||||
}
|
||||
return String_Set(ss)
|
||||
case Number_Set:
|
||||
ns := make([]string, len(v))
|
||||
for n, i in v {
|
||||
ns[i] = strings.clone(n)
|
||||
case DDB_Number_Set:
|
||||
ddb_ns := make([]DDB_Number, len(v))
|
||||
for num, i in v {
|
||||
ddb_ns[i] = clone_ddb_number(num)
|
||||
}
|
||||
return Number_Set(ns)
|
||||
return DDB_Number_Set(ddb_ns)
|
||||
case Binary_Set:
|
||||
bs := make([]string, len(v))
|
||||
for b, i in v {
|
||||
@@ -427,8 +438,9 @@ attr_value_destroy :: proc(attr: ^Attribute_Value) {
|
||||
switch v in attr {
|
||||
case String:
|
||||
delete(string(v))
|
||||
case Number:
|
||||
delete(string(v))
|
||||
case DDB_Number:
|
||||
delete(v.integer_part)
|
||||
delete(v.fractional_part)
|
||||
case Binary:
|
||||
delete(string(v))
|
||||
case String_Set:
|
||||
@@ -437,12 +449,12 @@ attr_value_destroy :: proc(attr: ^Attribute_Value) {
|
||||
}
|
||||
slice := v
|
||||
delete(slice)
|
||||
case Number_Set:
|
||||
for n in v {
|
||||
delete(n)
|
||||
case DDB_Number_Set:
|
||||
for num in v {
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
}
|
||||
slice := v
|
||||
delete(slice)
|
||||
delete(v)
|
||||
case Binary_Set:
|
||||
for b in v {
|
||||
delete(b)
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:fmt"
|
||||
import "core:strconv"
|
||||
import "core:strings"
|
||||
|
||||
// ============================================================================
|
||||
@@ -596,7 +594,23 @@ is_clause_keyword :: proc(tok: string) -> bool {
|
||||
// Execute Update Plan — apply mutations to an Item (in-place)
|
||||
// ============================================================================
|
||||
|
||||
execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
// Reasons an update plan can fail at execution time.
|
||||
// All of these map to ValidationException at the HTTP layer.
|
||||
Update_Exec_Error :: enum {
|
||||
None,
|
||||
// SET x = source +/- val: source attribute does not exist in the item
|
||||
Operand_Not_Found,
|
||||
// SET x = source +/- val: source or value attribute is not a Number
|
||||
Operand_Not_Number,
|
||||
// SET x = list_append(source, val): source attribute is not a List
|
||||
Operand_Not_List,
|
||||
// ADD path val: existing attribute is not a Number, String_Set, or Number_Set
|
||||
Add_Type_Mismatch,
|
||||
// ADD path val: value type does not match the existing set type
|
||||
Add_Value_Type_Mismatch,
|
||||
}
|
||||
|
||||
execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> Update_Exec_Error {
|
||||
// Execute SET actions
|
||||
for &action in plan.sets {
|
||||
switch action.value_kind {
|
||||
@@ -615,11 +629,11 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
if src, found := item[action.source]; found {
|
||||
existing = src
|
||||
} else {
|
||||
return false // source attribute not found
|
||||
return .Operand_Not_Found
|
||||
}
|
||||
result, add_ok := numeric_add(existing, action.value)
|
||||
if !add_ok {
|
||||
return false
|
||||
return .Operand_Not_Number
|
||||
}
|
||||
if old, found := item[action.path]; found {
|
||||
old_copy := old
|
||||
@@ -634,11 +648,11 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
if src, found := item[action.source]; found {
|
||||
existing = src
|
||||
} else {
|
||||
return false
|
||||
return .Operand_Not_Found
|
||||
}
|
||||
result, sub_ok := numeric_subtract(existing, action.value)
|
||||
if !sub_ok {
|
||||
return false
|
||||
return .Operand_Not_Number
|
||||
}
|
||||
if old, found := item[action.path]; found {
|
||||
old_copy := old
|
||||
@@ -666,7 +680,7 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
if l, is_list := src.(List); is_list {
|
||||
existing_list = ([]Attribute_Value)(l)
|
||||
} else {
|
||||
return false
|
||||
return .Operand_Not_List
|
||||
}
|
||||
} else {
|
||||
existing_list = {}
|
||||
@@ -676,7 +690,7 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
if l, is_list := action.value.(List); is_list {
|
||||
append_list = ([]Attribute_Value)(l)
|
||||
} else {
|
||||
return false
|
||||
return .Operand_Not_List
|
||||
}
|
||||
|
||||
new_list := make([]Attribute_Value, len(existing_list) + len(append_list))
|
||||
@@ -710,10 +724,10 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
if existing, found := item[action.path]; found {
|
||||
// If existing is a number, add numerically
|
||||
#partial switch v in existing {
|
||||
case Number:
|
||||
case DDB_Number:
|
||||
result, add_ok := numeric_add(existing, action.value)
|
||||
if !add_ok {
|
||||
return false
|
||||
return .Operand_Not_Number
|
||||
}
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
@@ -729,22 +743,22 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
delete_key(item, action.path)
|
||||
item[strings.clone(action.path)] = String_Set(merged)
|
||||
} else {
|
||||
return false
|
||||
return .Add_Value_Type_Mismatch
|
||||
}
|
||||
|
||||
case Number_Set:
|
||||
if new_ns, is_ns := action.value.(Number_Set); is_ns {
|
||||
merged := set_union_strings(([]string)(v), ([]string)(new_ns))
|
||||
case DDB_Number_Set:
|
||||
if new_ns, is_ns := action.value.(DDB_Number_Set); is_ns {
|
||||
merged := set_union_ddb_numbers(([]DDB_Number)(v), ([]DDB_Number)(new_ns))
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
item[strings.clone(action.path)] = Number_Set(merged)
|
||||
item[strings.clone(action.path)] = DDB_Number_Set(merged)
|
||||
} else {
|
||||
return false
|
||||
return .Add_Value_Type_Mismatch
|
||||
}
|
||||
|
||||
case:
|
||||
return false
|
||||
return .Add_Type_Mismatch
|
||||
}
|
||||
} else {
|
||||
// Attribute doesn't exist — create it
|
||||
@@ -769,14 +783,14 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
case Number_Set:
|
||||
if del_ns, is_ns := action.value.(Number_Set); is_ns {
|
||||
result := set_difference_strings(([]string)(v), ([]string)(del_ns))
|
||||
case DDB_Number_Set:
|
||||
if del_ns, is_ns := action.value.(DDB_Number_Set); is_ns {
|
||||
result := set_difference_ddb_numbers(([]DDB_Number)(v), ([]DDB_Number)(del_ns))
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
if len(result) > 0 {
|
||||
item[strings.clone(action.path)] = Number_Set(result)
|
||||
item[strings.clone(action.path)] = DDB_Number_Set(result)
|
||||
} else {
|
||||
delete(result)
|
||||
}
|
||||
@@ -788,7 +802,7 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@@ -796,48 +810,31 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> bool {
|
||||
// ============================================================================
|
||||
|
||||
numeric_add :: proc(a: Attribute_Value, b: Attribute_Value) -> (Attribute_Value, bool) {
|
||||
a_num, a_ok := a.(Number)
|
||||
b_num, b_ok := b.(Number)
|
||||
a_num, a_ok := a.(DDB_Number)
|
||||
b_num, b_ok := b.(DDB_Number)
|
||||
if !a_ok || !b_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
a_val, a_parse := strconv.parse_f64(string(a_num))
|
||||
b_val, b_parse := strconv.parse_f64(string(b_num))
|
||||
if !a_parse || !b_parse {
|
||||
result, result_ok := add_ddb_numbers(a_num, b_num)
|
||||
if !result_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
result := a_val + b_val
|
||||
result_str := format_number(result)
|
||||
return Number(result_str), true
|
||||
return result, true
|
||||
}
|
||||
|
||||
numeric_subtract :: proc(a: Attribute_Value, b: Attribute_Value) -> (Attribute_Value, bool) {
|
||||
a_num, a_ok := a.(Number)
|
||||
b_num, b_ok := b.(Number)
|
||||
a_num, a_ok := a.(DDB_Number)
|
||||
b_num, b_ok := b.(DDB_Number)
|
||||
if !a_ok || !b_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
a_val, a_parse := strconv.parse_f64(string(a_num))
|
||||
b_val, b_parse := strconv.parse_f64(string(b_num))
|
||||
if !a_parse || !b_parse {
|
||||
result, result_ok := subtract_ddb_numbers(a_num, b_num)
|
||||
if !result_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
result := a_val - b_val
|
||||
result_str := format_number(result)
|
||||
return Number(result_str), true
|
||||
}
|
||||
|
||||
format_number :: proc(val: f64) -> string {
|
||||
// If it's an integer, format without decimal point
|
||||
int_val := i64(val)
|
||||
if f64(int_val) == val {
|
||||
return fmt.aprintf("%d", int_val)
|
||||
}
|
||||
return fmt.aprintf("%g", val)
|
||||
return result, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@@ -877,6 +874,52 @@ set_difference_strings :: proc(a: []string, b: []string) -> []string {
|
||||
return result[:]
|
||||
}
|
||||
|
||||
// Union of two DDB_Number slices (dedup by numeric equality)
|
||||
set_union_ddb_numbers :: proc(a: []DDB_Number, b: []DDB_Number) -> []DDB_Number {
|
||||
result := make([dynamic]DDB_Number)
|
||||
|
||||
// Add all from a
|
||||
for num in a {
|
||||
append(&result, clone_ddb_number(num))
|
||||
}
|
||||
|
||||
// Add from b if not already present
|
||||
for num in b {
|
||||
found := false
|
||||
for existing in result {
|
||||
if compare_ddb_numbers(existing, num) == 0 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
append(&result, clone_ddb_number(num))
|
||||
}
|
||||
}
|
||||
|
||||
return result[:]
|
||||
}
|
||||
|
||||
// Difference: elements in a that are NOT in b
|
||||
set_difference_ddb_numbers :: proc(a: []DDB_Number, b: []DDB_Number) -> []DDB_Number {
|
||||
result := make([dynamic]DDB_Number)
|
||||
|
||||
for num in a {
|
||||
in_b := false
|
||||
for del in b {
|
||||
if compare_ddb_numbers(num, del) == 0 {
|
||||
in_b = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !in_b {
|
||||
append(&result, clone_ddb_number(num))
|
||||
}
|
||||
}
|
||||
|
||||
return result[:]
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Request Parsing Helper
|
||||
// ============================================================================
|
||||
@@ -903,7 +946,7 @@ parse_update_expression_string :: proc(request_body: []byte) -> (expr: string, o
|
||||
return
|
||||
}
|
||||
|
||||
expr = string(ue_str)
|
||||
expr = strings.clone(string(ue_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
@@ -912,24 +955,24 @@ parse_update_expression_string :: proc(request_body: []byte) -> (expr: string, o
|
||||
parse_return_values :: proc(request_body: []byte) -> string {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return "NONE"
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return "NONE"
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
|
||||
rv_val, found := root["ReturnValues"]
|
||||
if !found {
|
||||
return "NONE"
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
|
||||
rv_str, str_ok := rv_val.(json.String)
|
||||
if !str_ok {
|
||||
return "NONE"
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
|
||||
return string(rv_str)
|
||||
return strings.clone(string(rv_str))
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// update_item.odin — Storage layer UpdateItem operation
|
||||
// This file lives in the dynamodb/ package alongside storage.odin
|
||||
package dynamodb
|
||||
|
||||
import "core:strings"
|
||||
@@ -8,6 +6,7 @@ import "../rocksdb"
|
||||
|
||||
// UpdateItem — fetch existing item, apply update plan, write back
|
||||
// Uses EXCLUSIVE lock (write operation)
|
||||
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI updates are atomic
|
||||
//
|
||||
// Returns:
|
||||
// - old_item: the item BEFORE mutations (if it existed), for ReturnValues
|
||||
@@ -59,7 +58,7 @@ update_item :: proc(
|
||||
return nil, nil, .Serialization_Error
|
||||
}
|
||||
existing_item = decoded
|
||||
// Save old item for ReturnValues
|
||||
// Save old item for ReturnValues (and for GSI cleanup)
|
||||
old_item = item_deep_copy(existing_item)
|
||||
} else if get_err == .NotFound || existing_encoded == nil {
|
||||
// Item doesn't exist yet — start with just the key attributes
|
||||
@@ -74,14 +73,14 @@ update_item :: proc(
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
|
||||
// Apply update plan
|
||||
if !execute_update_plan(&existing_item, plan) {
|
||||
// Apply update plan.
|
||||
if exec_err := execute_update_plan(&existing_item, plan); exec_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .Invalid_Key
|
||||
return nil, nil, .Validation_Error
|
||||
}
|
||||
|
||||
// Validate key attributes are still present and correct type
|
||||
@@ -109,9 +108,46 @@ update_item :: proc(
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
// Write back to RocksDB
|
||||
put_err := rocksdb.db_put(&engine.db, storage_key, encoded_item)
|
||||
if put_err != .None {
|
||||
// --- ATOMIC WRITE BATCH: base item + all GSI updates ---
|
||||
batch, batch_err := rocksdb.batch_create()
|
||||
if batch_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
defer rocksdb.batch_destroy(&batch)
|
||||
|
||||
// Add base item write to batch
|
||||
rocksdb.batch_put(&batch, storage_key, encoded_item)
|
||||
|
||||
// Add old GSI entry deletions to batch (if item existed before)
|
||||
if old, has := old_item.?; has {
|
||||
gsi_del_err := gsi_batch_delete_entries(&batch, table_name, old, &metadata)
|
||||
if gsi_del_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
return nil, nil, gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new GSI entry writes to batch
|
||||
gsi_write_err := gsi_batch_write_entries(&batch, table_name, existing_item, &metadata)
|
||||
if gsi_write_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, gsi_write_err
|
||||
}
|
||||
|
||||
// Write batch atomically - ALL or NOTHING
|
||||
write_err := rocksdb.batch_write(&engine.db, &batch)
|
||||
if write_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
@@ -120,12 +156,6 @@ update_item :: proc(
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
|
||||
// --- GSI maintenance: delete old entries, write new entries ---
|
||||
if old, has := old_item.?; has {
|
||||
gsi_delete_entries(engine, table_name, old, &metadata)
|
||||
}
|
||||
gsi_write_entries(engine, table_name, existing_item, &metadata)
|
||||
|
||||
new_item = existing_item
|
||||
return old_item, new_item, .None
|
||||
}
|
||||
@@ -259,7 +259,7 @@ parse_index_name :: proc(request_body: []byte) -> Maybe(string) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return string(idx_str)
|
||||
return strings.clone(string(idx_str))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
|
||||
110
http.odin
110
http.odin
@@ -6,6 +6,7 @@ import vmem "core:mem/virtual"
|
||||
import "core:net"
|
||||
import "core:strings"
|
||||
import "core:strconv"
|
||||
import "core:thread"
|
||||
|
||||
// HTTP Method enumeration
|
||||
HTTP_Method :: enum {
|
||||
@@ -100,9 +101,16 @@ response_set_body :: proc(resp: ^HTTP_Response, data: []byte) {
|
||||
}
|
||||
|
||||
// Request handler function type
|
||||
// Takes context pointer, request, and request-scoped allocator
|
||||
Request_Handler :: #type proc(ctx: rawptr, request: ^HTTP_Request, request_alloc: mem.Allocator) -> HTTP_Response
|
||||
|
||||
// Parse error enum
|
||||
Parse_Error :: enum {
|
||||
None,
|
||||
Connection_Closed,
|
||||
Invalid_Request,
|
||||
Body_Too_Large,
|
||||
}
|
||||
|
||||
// Server configuration
|
||||
Server_Config :: struct {
|
||||
max_body_size: int, // default 100MB
|
||||
@@ -122,6 +130,13 @@ default_server_config :: proc() -> Server_Config {
|
||||
}
|
||||
}
|
||||
|
||||
// Connection task data - passed to worker threads
|
||||
Connection_Task_Data :: struct {
|
||||
server: ^Server,
|
||||
conn: net.TCP_Socket,
|
||||
source: net.Endpoint,
|
||||
}
|
||||
|
||||
// Server
|
||||
Server :: struct {
|
||||
allocator: mem.Allocator,
|
||||
@@ -168,9 +183,12 @@ server_start :: proc(server: ^Server) -> bool {
|
||||
server.socket = socket
|
||||
server.running = true
|
||||
|
||||
fmt.printfln("HTTP server listening on %v", server.endpoint)
|
||||
fmt.printfln("HTTP server listening on %v (thread-per-connection)", server.endpoint)
|
||||
fmt.printfln(" Max body size: %d MB", server.config.max_body_size / (1024 * 1024))
|
||||
fmt.printfln(" Max headers: %d", server.config.max_headers)
|
||||
fmt.printfln(" Keep-alive: %v", server.config.enable_keep_alive)
|
||||
|
||||
// Accept loop
|
||||
// Accept loop - spawn a thread for each connection
|
||||
for server.running {
|
||||
conn, source, accept_err := net.accept_tcp(socket)
|
||||
if accept_err != nil {
|
||||
@@ -180,9 +198,23 @@ server_start :: proc(server: ^Server) -> bool {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle connection in separate goroutine would go here
|
||||
// For now, handle synchronously (should spawn thread)
|
||||
handle_connection(server, conn, source)
|
||||
// Allocate connection data
|
||||
conn_data := new(Connection_Task_Data, server.allocator)
|
||||
conn_data.server = server
|
||||
conn_data.conn = conn
|
||||
conn_data.source = source
|
||||
|
||||
// Spawn a new thread for this connection
|
||||
t := thread.create(connection_worker_thread)
|
||||
if t != nil {
|
||||
t.init_context = context
|
||||
t.data = conn_data
|
||||
thread.start(t)
|
||||
} else {
|
||||
// Failed to create thread, close connection
|
||||
net.close(conn)
|
||||
free(conn_data, server.allocator)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
@@ -190,12 +222,33 @@ server_start :: proc(server: ^Server) -> bool {
|
||||
|
||||
server_stop :: proc(server: ^Server) {
|
||||
server.running = false
|
||||
|
||||
// Close listening socket
|
||||
if sock, ok := server.socket.?; ok {
|
||||
net.close(sock)
|
||||
server.socket = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Worker thread procedure
|
||||
connection_worker_thread :: proc(t: ^thread.Thread) {
|
||||
defer thread.destroy(t)
|
||||
|
||||
conn_data := cast(^Connection_Task_Data)t.data
|
||||
defer free(conn_data, conn_data.server.allocator)
|
||||
|
||||
handle_connection(conn_data.server, conn_data.conn, conn_data.source)
|
||||
}
|
||||
|
||||
// Create error response
|
||||
make_error_response_simple :: proc(allocator: mem.Allocator, status: HTTP_Status, message: string) -> HTTP_Response {
|
||||
response := response_init(allocator)
|
||||
response_set_status(&response, status)
|
||||
response_add_header(&response, "Content-Type", "text/plain")
|
||||
response_set_body(&response, transmute([]byte)message)
|
||||
return response
|
||||
}
|
||||
|
||||
// Handle a single connection
|
||||
handle_connection :: proc(server: ^Server, conn: net.TCP_Socket, source: net.Endpoint) {
|
||||
defer net.close(conn)
|
||||
@@ -214,13 +267,26 @@ handle_connection :: proc(server: ^Server, conn: net.TCP_Socket, source: net.End
|
||||
|
||||
request_alloc := vmem.arena_allocator(&arena)
|
||||
|
||||
// TODO: Double check if we want *all* downstream allocations to use the request arena?
|
||||
// Set request arena as context allocator for downstream allocations
|
||||
old := context.allocator
|
||||
context.allocator = request_alloc
|
||||
defer context.allocator = old
|
||||
|
||||
request, parse_ok := parse_request(conn, request_alloc, server.config)
|
||||
if !parse_ok {
|
||||
request, parse_err := parse_request(conn, request_alloc, server.config)
|
||||
|
||||
// Handle parse errors
|
||||
if parse_err != .None {
|
||||
#partial switch parse_err {
|
||||
case .Body_Too_Large:
|
||||
// Send 413 Payload Too Large
|
||||
response := make_error_response_simple(request_alloc, .Payload_Too_Large,
|
||||
fmt.tprintf("Request body exceeds maximum size of %d bytes", server.config.max_body_size))
|
||||
send_response(conn, &response, request_alloc)
|
||||
case .Invalid_Request:
|
||||
// Send 400 Bad Request
|
||||
response := make_error_response_simple(request_alloc, .Bad_Request, "Invalid HTTP request")
|
||||
send_response(conn, &response, request_alloc)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
@@ -250,13 +316,13 @@ parse_request :: proc(
|
||||
conn: net.TCP_Socket,
|
||||
allocator: mem.Allocator,
|
||||
config: Server_Config,
|
||||
) -> (HTTP_Request, bool) {
|
||||
) -> (HTTP_Request, Parse_Error) {
|
||||
// Read request line and headers
|
||||
buffer := make([]byte, config.read_buffer_size, allocator)
|
||||
|
||||
bytes_read, read_err := net.recv_tcp(conn, buffer)
|
||||
if read_err != nil || bytes_read == 0 {
|
||||
return {}, false
|
||||
return {}, .Connection_Closed
|
||||
}
|
||||
|
||||
request_data := buffer[:bytes_read]
|
||||
@@ -264,7 +330,7 @@ parse_request :: proc(
|
||||
// Find end of headers (\r\n\r\n)
|
||||
header_end_idx := strings.index(string(request_data), "\r\n\r\n")
|
||||
if header_end_idx < 0 {
|
||||
return {}, false
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
|
||||
header_section := string(request_data[:header_end_idx])
|
||||
@@ -273,13 +339,13 @@ parse_request :: proc(
|
||||
// Parse request line
|
||||
lines := strings.split_lines(header_section, allocator)
|
||||
if len(lines) == 0 {
|
||||
return {}, false
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
|
||||
request_line := lines[0]
|
||||
parts := strings.split(request_line, " ", allocator)
|
||||
if len(parts) < 3 {
|
||||
return {}, false
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
|
||||
method := method_from_string(parts[0])
|
||||
@@ -305,6 +371,11 @@ parse_request :: proc(
|
||||
name = strings.clone(name, allocator),
|
||||
value = strings.clone(value, allocator),
|
||||
})
|
||||
|
||||
// Check max headers limit
|
||||
if len(headers) > config.max_headers {
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
}
|
||||
|
||||
// Read body if Content-Length present
|
||||
@@ -314,7 +385,12 @@ parse_request :: proc(
|
||||
if cl, ok := content_length_header.?; ok {
|
||||
content_length := strconv.parse_int(cl) or_else 0
|
||||
|
||||
if content_length > 0 && content_length <= config.max_body_size {
|
||||
// Check if body size exceeds limit
|
||||
if content_length > config.max_body_size {
|
||||
return {}, .Body_Too_Large
|
||||
}
|
||||
|
||||
if content_length > 0 {
|
||||
// Check if we already have the body in buffer
|
||||
existing_body := request_data[body_start:]
|
||||
|
||||
@@ -336,7 +412,7 @@ parse_request :: proc(
|
||||
|
||||
n, err := net.recv_tcp(conn, chunk)
|
||||
if err != nil || n == 0 {
|
||||
return {}, false
|
||||
return {}, .Connection_Closed
|
||||
}
|
||||
|
||||
copy(body[body_written:], chunk[:n])
|
||||
@@ -352,7 +428,7 @@ parse_request :: proc(
|
||||
path = path,
|
||||
headers = headers[:],
|
||||
body = body,
|
||||
}, true
|
||||
}, .None
|
||||
}
|
||||
|
||||
// Helper to get header from slice
|
||||
|
||||
446
main.odin
446
main.odin
@@ -13,6 +13,13 @@ Config :: struct {
|
||||
port: int,
|
||||
data_dir: string,
|
||||
verbose: bool,
|
||||
|
||||
// HTTP server config
|
||||
max_body_size: int,
|
||||
max_headers: int,
|
||||
read_buffer_size: int,
|
||||
enable_keep_alive: bool,
|
||||
max_requests_per_connection: int,
|
||||
}
|
||||
|
||||
main :: proc() {
|
||||
@@ -36,8 +43,14 @@ main :: proc() {
|
||||
fmt.printfln("Storage engine initialized at %s", config.data_dir)
|
||||
fmt.printfln("Starting DynamoDB-compatible server on %s:%d", config.host, config.port)
|
||||
|
||||
// Create HTTP server
|
||||
server_config := default_server_config()
|
||||
// Create HTTP server with config values
|
||||
server_config := Server_Config{
|
||||
max_body_size = config.max_body_size,
|
||||
max_headers = config.max_headers,
|
||||
read_buffer_size = config.read_buffer_size,
|
||||
enable_keep_alive = config.enable_keep_alive,
|
||||
max_requests_per_connection = config.max_requests_per_connection,
|
||||
}
|
||||
|
||||
server, server_ok := server_init(
|
||||
context.allocator,
|
||||
@@ -190,6 +203,15 @@ handle_create_table :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Req
|
||||
}
|
||||
}
|
||||
|
||||
if gsi_val, found2 := root["GlobalSecondaryIndexes"]; found2 {
|
||||
if gsi_arr, ok2 := gsi_val.(json.Array); ok2 && len(gsi_arr) > 0 {
|
||||
if _, has := gsis.?; !has {
|
||||
make_error_response(response, .ValidationException, "Invalid GlobalSecondaryIndexes definition")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create the table
|
||||
desc, create_err := dynamodb.create_table(engine, string(table_name), key_schema, attr_defs, gsis)
|
||||
if create_err != .None {
|
||||
@@ -219,6 +241,7 @@ handle_delete_table :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Req
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
err := dynamodb.delete_table(engine, table_name)
|
||||
if err != .None {
|
||||
@@ -241,6 +264,7 @@ handle_describe_table :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
metadata, err := dynamodb.get_table_metadata(engine, table_name)
|
||||
if err != .None {
|
||||
@@ -266,16 +290,22 @@ handle_describe_table :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
|
||||
for ks, i in metadata.key_schema {
|
||||
if i > 0 do strings.write_string(&builder, ",")
|
||||
fmt.sbprintf(&builder, `{"AttributeName":"%s","KeyType":"%s"}`,
|
||||
ks.attribute_name, dynamodb.key_type_to_string(ks.key_type))
|
||||
strings.write_string(&builder, `{"AttributeName":"`)
|
||||
strings.write_string(&builder, ks.attribute_name)
|
||||
strings.write_string(&builder, `","KeyType":"`)
|
||||
strings.write_string(&builder, dynamodb.key_type_to_string(ks.key_type))
|
||||
strings.write_string(&builder, `"}`)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, `],"AttributeDefinitions":[`)
|
||||
|
||||
for ad, i in metadata.attribute_definitions {
|
||||
if i > 0 do strings.write_string(&builder, ",")
|
||||
fmt.sbprintf(&builder, `{"AttributeName":"%s","AttributeType":"%s"}`,
|
||||
ad.attribute_name, dynamodb.scalar_type_to_string(ad.attribute_type))
|
||||
strings.write_string(&builder, `{"AttributeName":"`)
|
||||
strings.write_string(&builder, ad.attribute_name)
|
||||
strings.write_string(&builder, `","AttributeType":"`)
|
||||
strings.write_string(&builder, dynamodb.scalar_type_to_string(ad.attribute_type))
|
||||
strings.write_string(&builder, `"}`)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, `]`)
|
||||
@@ -290,8 +320,11 @@ handle_describe_table :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
strings.write_string(&builder, `","KeySchema":[`)
|
||||
for ks, ki in gsi.key_schema {
|
||||
if ki > 0 do strings.write_string(&builder, ",")
|
||||
fmt.sbprintf(&builder, `{"AttributeName":"%s","KeyType":"%s"}`,
|
||||
ks.attribute_name, dynamodb.key_type_to_string(ks.key_type))
|
||||
strings.write_string(&builder, `{"AttributeName":"`)
|
||||
strings.write_string(&builder, ks.attribute_name)
|
||||
strings.write_string(&builder, `","KeyType":"`)
|
||||
strings.write_string(&builder, dynamodb.key_type_to_string(ks.key_type))
|
||||
strings.write_string(&builder, `"}`)
|
||||
}
|
||||
strings.write_string(&builder, `],"Projection":{"ProjectionType":"`)
|
||||
strings.write_string(&builder, projection_type_to_string(gsi.projection.projection_type))
|
||||
@@ -340,6 +373,7 @@ handle_put_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
item, item_ok := dynamodb.parse_item_from_request(request.body)
|
||||
if !item_ok {
|
||||
@@ -349,8 +383,9 @@ handle_put_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request
|
||||
defer dynamodb.item_destroy(&item)
|
||||
|
||||
// ---- ConditionExpression evaluation ----
|
||||
_, has_condition := dynamodb.parse_condition_expression_string(request.body)
|
||||
cond_str, has_condition := dynamodb.parse_condition_expression_string(request.body)
|
||||
if has_condition {
|
||||
defer delete(cond_str)
|
||||
// Parse shared expression attributes
|
||||
attr_names := dynamodb.parse_expression_attribute_names(request.body)
|
||||
defer {
|
||||
@@ -386,12 +421,21 @@ handle_put_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request
|
||||
// If no explicit Key field, extract key from Item
|
||||
// (PutItem doesn't have a Key field — the key is in the Item itself)
|
||||
existing_maybe, get_err := dynamodb.get_item(engine, table_name, item)
|
||||
if get_err != .None && get_err != .Table_Not_Found {
|
||||
// Table not found is handled by put_item below
|
||||
if get_err == .Missing_Key_Attribute || get_err == .Invalid_Key {
|
||||
#partial switch get_err {
|
||||
case .None:
|
||||
// Item found or not found — both are fine, condition evaluates against
|
||||
// whatever was returned (nil item = item doesn't exist).
|
||||
case .Table_Not_Found:
|
||||
// Table will be caught and reported properly by put_item below.
|
||||
case .Missing_Key_Attribute, .Invalid_Key:
|
||||
handle_storage_error(response, get_err)
|
||||
return
|
||||
}
|
||||
case .RocksDB_Error, .Serialization_Error, .Internal_Error:
|
||||
make_error_response(response, .InternalServerError, "Failed to fetch existing item")
|
||||
return
|
||||
case .Validation_Error, .Item_Not_Found:
|
||||
// Item_Not_Found shouldn't reach here (get_item returns nil, .None),
|
||||
// but treat defensively.
|
||||
}
|
||||
existing_item = existing_maybe
|
||||
} else {
|
||||
@@ -452,6 +496,7 @@ handle_get_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
key, key_ok := dynamodb.parse_key_from_request(request.body)
|
||||
if !key_ok {
|
||||
@@ -468,9 +513,17 @@ handle_get_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request
|
||||
|
||||
if item_val, has_item := item.?; has_item {
|
||||
defer dynamodb.item_destroy(&item_val)
|
||||
item_json := dynamodb.serialize_item(item_val)
|
||||
resp := fmt.aprintf(`{"Item":%s}`, item_json)
|
||||
response_set_body(response, transmute([]byte)resp)
|
||||
|
||||
// Build response directly to avoid intermediate string allocations
|
||||
builder := strings.builder_make(context.allocator)
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
strings.write_string(&builder, `{"Item":`)
|
||||
dynamodb.serialize_item_to_builder(&builder, item_val)
|
||||
strings.write_string(&builder, `}`)
|
||||
|
||||
resp_body := strings.clone(strings.to_string(builder))
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
} else {
|
||||
response_set_body(response, transmute([]byte)string("{}"))
|
||||
}
|
||||
@@ -482,6 +535,7 @@ handle_delete_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
key, key_ok := dynamodb.parse_key_from_request(request.body)
|
||||
if !key_ok {
|
||||
@@ -521,11 +575,19 @@ handle_delete_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
|
||||
|
||||
// Fetch existing item
|
||||
existing_item, get_err := dynamodb.get_item(engine, table_name, key)
|
||||
if get_err != .None && get_err != .Table_Not_Found {
|
||||
if get_err == .Missing_Key_Attribute || get_err == .Invalid_Key {
|
||||
#partial switch get_err {
|
||||
case .None:
|
||||
// Item found or not found — condition evaluates against whatever was returned.
|
||||
case .Table_Not_Found:
|
||||
// Table will be caught and reported properly by delete_item below.
|
||||
case .Missing_Key_Attribute, .Invalid_Key:
|
||||
handle_storage_error(response, get_err)
|
||||
return
|
||||
}
|
||||
case .RocksDB_Error, .Serialization_Error, .Internal_Error:
|
||||
make_error_response(response, .InternalServerError, "Failed to fetch existing item")
|
||||
return
|
||||
case .Validation_Error, .Item_Not_Found:
|
||||
// Defensive — shouldn't reach here normally.
|
||||
}
|
||||
defer {
|
||||
if ex, has_ex := existing_item.?; has_ex {
|
||||
@@ -571,6 +633,7 @@ handle_update_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
// Parse Key
|
||||
key_item, key_ok := dynamodb.parse_key_from_request(request.body)
|
||||
@@ -586,6 +649,7 @@ handle_update_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
|
||||
make_error_response(response, .ValidationException, "Missing or invalid UpdateExpression")
|
||||
return
|
||||
}
|
||||
defer delete(update_expr)
|
||||
|
||||
// Parse ExpressionAttributeNames and ExpressionAttributeValues
|
||||
attr_names := dynamodb.parse_expression_attribute_names(request.body)
|
||||
@@ -661,6 +725,7 @@ handle_update_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
|
||||
|
||||
// Parse ReturnValues
|
||||
return_values := dynamodb.parse_return_values(request.body)
|
||||
defer delete(return_values)
|
||||
|
||||
// Execute update
|
||||
old_item, new_item, err := dynamodb.update_item(engine, table_name, key_item, &plan)
|
||||
@@ -680,51 +745,59 @@ handle_update_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
|
||||
}
|
||||
|
||||
// Build response based on ReturnValues
|
||||
builder := strings.builder_make(context.allocator)
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
switch return_values {
|
||||
case "ALL_NEW":
|
||||
if new_val, has := new_item.?; has {
|
||||
item_json := dynamodb.serialize_item(new_val)
|
||||
resp := fmt.aprintf(`{"Attributes":%s}`, item_json)
|
||||
response_set_body(response, transmute([]byte)resp)
|
||||
strings.write_string(&builder, `{"Attributes":`)
|
||||
dynamodb.serialize_item_to_builder(&builder, new_val)
|
||||
strings.write_string(&builder, `}`)
|
||||
} else {
|
||||
response_set_body(response, transmute([]byte)string("{}"))
|
||||
strings.write_string(&builder, `{}`)
|
||||
}
|
||||
|
||||
case "ALL_OLD":
|
||||
if old, has := old_item.?; has {
|
||||
item_json := dynamodb.serialize_item(old)
|
||||
resp := fmt.aprintf(`{"Attributes":%s}`, item_json)
|
||||
response_set_body(response, transmute([]byte)resp)
|
||||
strings.write_string(&builder, `{"Attributes":`)
|
||||
dynamodb.serialize_item_to_builder(&builder, old)
|
||||
strings.write_string(&builder, `}`)
|
||||
} else {
|
||||
response_set_body(response, transmute([]byte)string("{}"))
|
||||
strings.write_string(&builder, `{}`)
|
||||
}
|
||||
|
||||
case "UPDATED_NEW":
|
||||
if new_val, has := new_item.?; has {
|
||||
filtered := filter_updated_attributes(new_val, &plan)
|
||||
defer dynamodb.item_destroy(&filtered)
|
||||
item_json := dynamodb.serialize_item(filtered)
|
||||
resp := fmt.aprintf(`{"Attributes":%s}`, item_json)
|
||||
response_set_body(response, transmute([]byte)resp)
|
||||
|
||||
strings.write_string(&builder, `{"Attributes":`)
|
||||
dynamodb.serialize_item_to_builder(&builder, filtered)
|
||||
strings.write_string(&builder, `}`)
|
||||
} else {
|
||||
response_set_body(response, transmute([]byte)string("{}"))
|
||||
strings.write_string(&builder, `{}`)
|
||||
}
|
||||
|
||||
case "UPDATED_OLD":
|
||||
if old, has := old_item.?; has {
|
||||
filtered := filter_updated_attributes(old, &plan)
|
||||
defer dynamodb.item_destroy(&filtered)
|
||||
item_json := dynamodb.serialize_item(filtered)
|
||||
resp := fmt.aprintf(`{"Attributes":%s}`, item_json)
|
||||
response_set_body(response, transmute([]byte)resp)
|
||||
|
||||
strings.write_string(&builder, `{"Attributes":`)
|
||||
dynamodb.serialize_item_to_builder(&builder, filtered)
|
||||
strings.write_string(&builder, `}`)
|
||||
} else {
|
||||
response_set_body(response, transmute([]byte)string("{}"))
|
||||
strings.write_string(&builder, `{}`)
|
||||
}
|
||||
|
||||
case:
|
||||
// "NONE" or default
|
||||
response_set_body(response, transmute([]byte)string("{}"))
|
||||
strings.write_string(&builder, `{}`)
|
||||
}
|
||||
|
||||
resp_body := strings.clone(strings.to_string(builder))
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
}
|
||||
|
||||
handle_batch_write_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, response: ^HTTP_Response) {
|
||||
@@ -872,7 +945,7 @@ handle_batch_write_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP
|
||||
}
|
||||
|
||||
append(&table_requests, dynamodb.Batch_Write_Table_Request{
|
||||
table_name = string(table_name),
|
||||
table_name = strings.clone(string(table_name)),
|
||||
requests = requests[:],
|
||||
})
|
||||
}
|
||||
@@ -917,9 +990,13 @@ handle_batch_write_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP
|
||||
item_json := dynamodb.serialize_item(req.item)
|
||||
switch req.type {
|
||||
case .Put:
|
||||
fmt.sbprintf(&builder, `{"PutRequest":{"Item":%s}}`, item_json)
|
||||
strings.write_string(&builder, `{"PutRequest":{"Item":`)
|
||||
strings.write_string(&builder, item_json)
|
||||
strings.write_string(&builder, "}}")
|
||||
case .Delete:
|
||||
fmt.sbprintf(&builder, `{"DeleteRequest":{"Key":%s}}`, item_json)
|
||||
strings.write_string(&builder, `{"DeleteRequest":{"Key":`)
|
||||
strings.write_string(&builder, item_json)
|
||||
strings.write_string(&builder, "}}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1010,7 +1087,7 @@ handle_batch_get_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
}
|
||||
|
||||
append(&table_requests, dynamodb.Batch_Get_Table_Request{
|
||||
table_name = string(table_name),
|
||||
table_name = strings.clone(string(table_name)),
|
||||
keys = keys[:],
|
||||
})
|
||||
}
|
||||
@@ -1037,7 +1114,9 @@ handle_batch_get_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
defer dynamodb.batch_get_result_destroy(&result)
|
||||
|
||||
// Build response
|
||||
builder := strings.builder_make()
|
||||
builder := strings.builder_make(context.allocator)
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
strings.write_string(&builder, `{"Responses":{`)
|
||||
|
||||
for table_result, ti in result.responses {
|
||||
@@ -1050,8 +1129,7 @@ handle_batch_get_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
if ii > 0 {
|
||||
strings.write_string(&builder, ",")
|
||||
}
|
||||
item_json := dynamodb.serialize_item(item)
|
||||
strings.write_string(&builder, item_json)
|
||||
dynamodb.serialize_item_to_builder(&builder, item)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, "]")
|
||||
@@ -1063,14 +1141,15 @@ handle_batch_get_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
if ti > 0 {
|
||||
strings.write_string(&builder, ",")
|
||||
}
|
||||
fmt.sbprintf(&builder, `"%s":{"Keys":[`, table_req.table_name)
|
||||
strings.write_string(&builder, `"`)
|
||||
strings.write_string(&builder, table_req.table_name)
|
||||
strings.write_string(&builder, `":{"Keys":["`)
|
||||
|
||||
for key, ki in table_req.keys {
|
||||
if ki > 0 {
|
||||
strings.write_string(&builder, ",")
|
||||
}
|
||||
key_json := dynamodb.serialize_item(key)
|
||||
strings.write_string(&builder, key_json)
|
||||
dynamodb.serialize_item_to_builder(&builder, key)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, "]}")
|
||||
@@ -1078,7 +1157,8 @@ handle_batch_get_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
|
||||
|
||||
strings.write_string(&builder, "}}")
|
||||
|
||||
resp_body := strings.to_string(builder)
|
||||
// clone the god damn string
|
||||
resp_body := strings.clone(strings.to_string(builder))
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
}
|
||||
|
||||
@@ -1093,9 +1173,15 @@ handle_query :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, r
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
// Grab index name from request body
|
||||
index_name := parse_index_name(request.body)
|
||||
defer {
|
||||
if idx, has := index_name.?; has {
|
||||
delete(idx)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch table metadata early for ExclusiveStartKey parsing
|
||||
metadata, meta_err := dynamodb.get_table_metadata(engine, table_name)
|
||||
@@ -1133,11 +1219,15 @@ handle_query :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, r
|
||||
}
|
||||
|
||||
// Parse ExclusiveStartKey
|
||||
exclusive_start_key, esk_ok := dynamodb.parse_exclusive_start_key(
|
||||
exclusive_start_key, esk_ok, esk_body_err := dynamodb.parse_exclusive_start_key(
|
||||
request.body, table_name, metadata.key_schema,
|
||||
)
|
||||
if !esk_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid ExclusiveStartKey")
|
||||
if esk_body_err {
|
||||
make_error_response(response, .SerializationException, "Request body is not valid JSON")
|
||||
} else {
|
||||
make_error_response(response, .ValidationException, "Invalid ExclusiveStartKey")
|
||||
}
|
||||
return
|
||||
}
|
||||
defer {
|
||||
@@ -1165,7 +1255,11 @@ handle_query :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, r
|
||||
}
|
||||
}
|
||||
|
||||
attr_values, _ := dynamodb.parse_expression_attribute_values(request.body)
|
||||
attr_values, vals_ok := dynamodb.parse_expression_attribute_values(request.body)
|
||||
if !vals_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid ExpressionAttributeValues")
|
||||
return
|
||||
}
|
||||
defer {
|
||||
for k, v in attr_values {
|
||||
delete(k)
|
||||
@@ -1177,15 +1271,30 @@ handle_query :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, r
|
||||
|
||||
// ---- GSI query path ----
|
||||
if idx_name, has_idx := index_name.?; has_idx {
|
||||
_, gsi_found := dynamodb.find_gsi(&metadata, idx_name)
|
||||
gsi, gsi_found := dynamodb.find_gsi(&metadata, idx_name)
|
||||
if !gsi_found {
|
||||
make_error_response(response, .ValidationException,
|
||||
fmt.tprintf("The table does not have the specified index: %s", idx_name))
|
||||
return
|
||||
}
|
||||
|
||||
esk_gsi, esk_gsi_ok, esk_gsi_body_err := dynamodb.parse_exclusive_start_key_gsi(
|
||||
request.body, table_name, &metadata, gsi,
|
||||
)
|
||||
if !esk_gsi_ok {
|
||||
if esk_gsi_body_err {
|
||||
make_error_response(response, .SerializationException, "Request body is not valid JSON")
|
||||
} else {
|
||||
make_error_response(response, .ValidationException, "Invalid ExclusiveStartKey")
|
||||
}
|
||||
return
|
||||
}
|
||||
defer {
|
||||
if k, ok_gsi := esk_gsi.?; ok_gsi { delete(k) }
|
||||
}
|
||||
|
||||
result, err := dynamodb.gsi_query(engine, table_name, idx_name,
|
||||
pk_owned, exclusive_start_key, limit, sk_condition)
|
||||
pk_owned, esk_gsi, limit, sk_condition)
|
||||
if err != .None {
|
||||
handle_storage_error(response, err)
|
||||
return
|
||||
@@ -1193,7 +1302,11 @@ handle_query :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, r
|
||||
defer dynamodb.query_result_destroy(&result)
|
||||
|
||||
// Apply FilterExpression
|
||||
filtered_items := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
filtered_items, filter_ok := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
if !filter_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid FilterExpression")
|
||||
return
|
||||
}
|
||||
scanned_count := len(result.items)
|
||||
|
||||
// Apply ProjectionExpression
|
||||
@@ -1220,7 +1333,7 @@ handle_query :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, r
|
||||
}
|
||||
|
||||
write_items_response_with_pagination_ex(
|
||||
response, final_items, result.last_evaluated_key, &metadata, scanned_count,
|
||||
response, final_items, result.last_evaluated_key, &metadata, scanned_count, gsi,
|
||||
)
|
||||
|
||||
if has_proj && len(projection) > 0 {
|
||||
@@ -1241,7 +1354,11 @@ handle_query :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, r
|
||||
defer dynamodb.query_result_destroy(&result)
|
||||
|
||||
// ---- Apply FilterExpression (post-query filter) ----
|
||||
filtered_items := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
filtered_items, filter_ok := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
if !filter_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid FilterExpression")
|
||||
return
|
||||
}
|
||||
scanned_count := len(result.items)
|
||||
|
||||
// ---- Apply ProjectionExpression ----
|
||||
@@ -1288,9 +1405,15 @@ handle_scan :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, re
|
||||
make_error_response(response, .ValidationException, "Invalid request or missing TableName")
|
||||
return
|
||||
}
|
||||
defer delete(table_name)
|
||||
|
||||
// Grab index name from request body
|
||||
index_name := parse_index_name(request.body)
|
||||
defer {
|
||||
if idx, has := index_name.?; has {
|
||||
delete(idx)
|
||||
}
|
||||
}
|
||||
|
||||
metadata, meta_err := dynamodb.get_table_metadata(engine, table_name)
|
||||
if meta_err != .None {
|
||||
@@ -1304,11 +1427,15 @@ handle_scan :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, re
|
||||
limit = 100
|
||||
}
|
||||
|
||||
exclusive_start_key, esk_ok := dynamodb.parse_exclusive_start_key(
|
||||
exclusive_start_key, esk_ok, esk_body_err := dynamodb.parse_exclusive_start_key(
|
||||
request.body, table_name, metadata.key_schema,
|
||||
)
|
||||
if !esk_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid ExclusiveStartKey")
|
||||
if esk_body_err {
|
||||
make_error_response(response, .SerializationException, "Request body is not valid JSON")
|
||||
} else {
|
||||
make_error_response(response, .ValidationException, "Invalid ExclusiveStartKey")
|
||||
}
|
||||
return
|
||||
}
|
||||
defer {
|
||||
@@ -1330,7 +1457,11 @@ handle_scan :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, re
|
||||
}
|
||||
}
|
||||
|
||||
attr_values, _ := dynamodb.parse_expression_attribute_values(request.body)
|
||||
attr_values, vals_ok := dynamodb.parse_expression_attribute_values(request.body)
|
||||
if !vals_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid ExpressionAttributeValues")
|
||||
return
|
||||
}
|
||||
defer {
|
||||
for k, v in attr_values {
|
||||
delete(k)
|
||||
@@ -1342,23 +1473,44 @@ handle_scan :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, re
|
||||
|
||||
// ---- GSI scan path ----
|
||||
if idx_name, has_idx := index_name.?; has_idx {
|
||||
_, gsi_found := dynamodb.find_gsi(&metadata, idx_name)
|
||||
gsi, gsi_found := dynamodb.find_gsi(&metadata, idx_name)
|
||||
if !gsi_found {
|
||||
make_error_response(response, .ValidationException,
|
||||
fmt.tprintf("The table does not have the specified index: %s", idx_name))
|
||||
return
|
||||
}
|
||||
|
||||
result, err := dynamodb.gsi_scan(engine, table_name, idx_name, exclusive_start_key, limit)
|
||||
esk_gsi, esk_gsi_ok, esk_gsi_body_err := dynamodb.parse_exclusive_start_key_gsi(
|
||||
request.body, table_name, &metadata, gsi,
|
||||
)
|
||||
if !esk_gsi_ok {
|
||||
if esk_gsi_body_err {
|
||||
make_error_response(response, .SerializationException, "Request body is not valid JSON")
|
||||
} else {
|
||||
make_error_response(response, .ValidationException, "Invalid ExclusiveStartKey")
|
||||
}
|
||||
return
|
||||
}
|
||||
defer {
|
||||
if k, ok_gsi := esk_gsi.?; ok_gsi { delete(k) }
|
||||
}
|
||||
|
||||
result, err := dynamodb.gsi_scan(engine, table_name, idx_name, esk_gsi, limit)
|
||||
if err != .None {
|
||||
handle_storage_error(response, err)
|
||||
return
|
||||
}
|
||||
defer dynamodb.scan_result_destroy(&result)
|
||||
|
||||
filtered_items := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
// Apply FilterExpression
|
||||
filtered_items, filter_ok := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
if !filter_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid FilterExpression")
|
||||
return
|
||||
}
|
||||
scanned_count := len(result.items)
|
||||
|
||||
// Apply ProjectionExpression
|
||||
projection, has_proj := dynamodb.parse_projection_expression(request.body, attr_names)
|
||||
defer { // This block just frees the cloned string and projection slice
|
||||
if has_proj && len(projection) > 0 {
|
||||
@@ -1382,7 +1534,7 @@ handle_scan :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, re
|
||||
}
|
||||
|
||||
write_items_response_with_pagination_ex(
|
||||
response, final_items, result.last_evaluated_key, &metadata, scanned_count,
|
||||
response, final_items, result.last_evaluated_key, &metadata, scanned_count, gsi,
|
||||
)
|
||||
|
||||
if has_proj && len(projection) > 0 {
|
||||
@@ -1402,8 +1554,12 @@ handle_scan :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request, re
|
||||
}
|
||||
defer dynamodb.scan_result_destroy(&result)
|
||||
|
||||
// ---- Apply FilterExpression ----
|
||||
filtered_items := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
// ---- Apply FilterExpression (post-scan filter) ----
|
||||
filtered_items, filter_ok := apply_filter_to_items(request.body, result.items, attr_names, attr_values)
|
||||
if !filter_ok {
|
||||
make_error_response(response, .ValidationException, "Invalid FilterExpression")
|
||||
return
|
||||
}
|
||||
scanned_count := len(result.items)
|
||||
|
||||
// ---- Apply ProjectionExpression ----
|
||||
@@ -1451,15 +1607,16 @@ apply_filter_to_items :: proc(
|
||||
items: []dynamodb.Item,
|
||||
attr_names: Maybe(map[string]string),
|
||||
attr_values: map[string]dynamodb.Attribute_Value,
|
||||
) -> []dynamodb.Item {
|
||||
) -> (filtered_items: []dynamodb.Item, ok: bool) {
|
||||
filter_expr, has_filter := dynamodb.parse_filter_expression_string(request_body)
|
||||
if !has_filter {
|
||||
return items // no filter, return as-is
|
||||
return items, true
|
||||
}
|
||||
defer delete(filter_expr)
|
||||
|
||||
filter_node, filter_ok := dynamodb.parse_filter_expression(filter_expr, attr_names, attr_values)
|
||||
if !filter_ok || filter_node == nil {
|
||||
return items // failed to parse, return unfiltered
|
||||
return nil, false
|
||||
}
|
||||
defer {
|
||||
dynamodb.filter_node_destroy(filter_node)
|
||||
@@ -1473,7 +1630,7 @@ apply_filter_to_items :: proc(
|
||||
}
|
||||
}
|
||||
|
||||
return filtered[:]
|
||||
return filtered[:], true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@@ -1490,14 +1647,16 @@ write_items_response_with_pagination_ex :: proc(
|
||||
last_evaluated_key_binary: Maybe([]byte),
|
||||
metadata: ^dynamodb.Table_Metadata,
|
||||
scanned_count: int,
|
||||
gsi: ^dynamodb.Global_Secondary_Index = nil, // ← NEW parameter
|
||||
) {
|
||||
builder := strings.builder_make()
|
||||
builder := strings.builder_make(context.allocator)
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
strings.write_string(&builder, `{"Items":[`)
|
||||
|
||||
for item, i in items {
|
||||
if i > 0 do strings.write_string(&builder, ",")
|
||||
item_json := dynamodb.serialize_item(item)
|
||||
strings.write_string(&builder, item_json)
|
||||
dynamodb.serialize_item_to_builder(&builder, item)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, `],"Count":`)
|
||||
@@ -1506,7 +1665,16 @@ write_items_response_with_pagination_ex :: proc(
|
||||
fmt.sbprintf(&builder, "%d", scanned_count)
|
||||
|
||||
if binary_key, has_last := last_evaluated_key_binary.?; has_last {
|
||||
lek_json, lek_ok := dynamodb.serialize_last_evaluated_key(binary_key, metadata)
|
||||
lek_json: string
|
||||
lek_ok: bool
|
||||
|
||||
// Use GSI serializer if we have a GSI, otherwise use base table serializer
|
||||
if gsi != nil {
|
||||
lek_json, lek_ok = dynamodb.serialize_last_evaluated_key_gsi(binary_key, metadata, gsi)
|
||||
} else {
|
||||
lek_json, lek_ok = dynamodb.serialize_last_evaluated_key(binary_key, metadata)
|
||||
}
|
||||
|
||||
if lek_ok {
|
||||
strings.write_string(&builder, `,"LastEvaluatedKey":`)
|
||||
strings.write_string(&builder, lek_json)
|
||||
@@ -1515,7 +1683,7 @@ write_items_response_with_pagination_ex :: proc(
|
||||
|
||||
strings.write_string(&builder, "}")
|
||||
|
||||
resp_body := strings.to_string(builder)
|
||||
resp_body := strings.clone(strings.to_string(builder))
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
}
|
||||
|
||||
@@ -1535,13 +1703,15 @@ write_items_response_with_pagination :: proc(
|
||||
last_evaluated_key_binary: Maybe([]byte),
|
||||
metadata: ^dynamodb.Table_Metadata,
|
||||
) {
|
||||
builder := strings.builder_make()
|
||||
builder := strings.builder_make(context.allocator)
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
strings.write_string(&builder, `{"Items":[`)
|
||||
|
||||
// Use serialize_item_to_builder directly so we always get the correct response payload
|
||||
for item, i in items {
|
||||
if i > 0 do strings.write_string(&builder, ",")
|
||||
item_json := dynamodb.serialize_item(item)
|
||||
strings.write_string(&builder, item_json)
|
||||
dynamodb.serialize_item_to_builder(&builder, item)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, `],"Count":`)
|
||||
@@ -1549,20 +1719,18 @@ write_items_response_with_pagination :: proc(
|
||||
strings.write_string(&builder, `,"ScannedCount":`)
|
||||
fmt.sbprintf(&builder, "%d", len(items))
|
||||
|
||||
// Emit LastEvaluatedKey if the storage layer produced one
|
||||
if binary_key, has_last := last_evaluated_key_binary.?; has_last {
|
||||
lek_json, lek_ok := dynamodb.serialize_last_evaluated_key(binary_key, metadata)
|
||||
if lek_ok {
|
||||
strings.write_string(&builder, `,"LastEvaluatedKey":`)
|
||||
strings.write_string(&builder, lek_json)
|
||||
}
|
||||
// If decoding fails we still return the items — just without a pagination token.
|
||||
// The client will assume the scan/query is complete.
|
||||
}
|
||||
|
||||
strings.write_string(&builder, "}")
|
||||
|
||||
resp_body := strings.to_string(builder)
|
||||
// We have to Clone the string before passing to response_set_body
|
||||
resp_body := strings.clone(strings.to_string(builder))
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
}
|
||||
|
||||
@@ -1585,6 +1753,8 @@ handle_storage_error :: proc(response: ^HTTP_Response, err: dynamodb.Storage_Err
|
||||
make_error_response(response, .ValidationException, "One or more required key attributes are missing")
|
||||
case .Invalid_Key:
|
||||
make_error_response(response, .ValidationException, "Invalid key: type mismatch or malformed key value")
|
||||
case .Validation_Error:
|
||||
make_error_response(response, .ValidationException, "Invalid request: type mismatch or incompatible operand")
|
||||
case .Serialization_Error:
|
||||
make_error_response(response, .InternalServerError, "Internal serialization error")
|
||||
case .RocksDB_Error:
|
||||
@@ -1891,14 +2061,20 @@ make_error_response :: proc(response: ^HTTP_Response, err_type: dynamodb.DynamoD
|
||||
|
||||
parse_config :: proc() -> Config {
|
||||
config := Config{
|
||||
host = "0.0.0.0",
|
||||
port = 8002,
|
||||
data_dir = "./data",
|
||||
verbose = false,
|
||||
// Defaults
|
||||
host = "0.0.0.0",
|
||||
port = 8002,
|
||||
data_dir = "./data",
|
||||
verbose = false,
|
||||
max_body_size = 100 * 1024 * 1024, // 100 MB
|
||||
max_headers = 100,
|
||||
read_buffer_size = 8 * 1024, // 8 KB
|
||||
enable_keep_alive = true,
|
||||
max_requests_per_connection = 1000,
|
||||
}
|
||||
|
||||
// Environment variables
|
||||
if port_str, env_ok := os.lookup_env("JORMUN_PORT"); env_ok {
|
||||
// Environment variables (lower priority)
|
||||
if port_str, ok := os.lookup_env("JORMUN_PORT"); ok {
|
||||
if port, parse_ok := strconv.parse_int(port_str); parse_ok {
|
||||
config.port = port
|
||||
}
|
||||
@@ -1916,11 +2092,107 @@ parse_config :: proc() -> Config {
|
||||
config.verbose = verbose == "1"
|
||||
}
|
||||
|
||||
// TODO: Parse command line arguments
|
||||
if max_body_str, ok := os.lookup_env("JORMUN_MAX_BODY_SIZE"); ok {
|
||||
if max_body, parse_ok := strconv.parse_int(max_body_str); parse_ok {
|
||||
config.max_body_size = max_body
|
||||
}
|
||||
}
|
||||
|
||||
// Command line arguments (highest priority)
|
||||
args := os.args[1:] // Skip program name
|
||||
|
||||
for i := 0; i < len(args); i += 1 {
|
||||
arg := args[i]
|
||||
|
||||
// Helper to get next arg value
|
||||
get_value :: proc(args: []string, i: ^int) -> (string, bool) {
|
||||
if i^ + 1 < len(args) {
|
||||
i^ += 1
|
||||
return args[i^], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch arg {
|
||||
case "--host", "-h":
|
||||
if value, ok := get_value(args, &i); ok {
|
||||
config.host = value
|
||||
}
|
||||
case "--port", "-p":
|
||||
if value, ok := get_value(args, &i); ok {
|
||||
if port, parse_ok := strconv.parse_int(value); parse_ok {
|
||||
config.port = port
|
||||
}
|
||||
}
|
||||
case "--data-dir", "-d":
|
||||
if value, ok := get_value(args, &i); ok {
|
||||
config.data_dir = value
|
||||
}
|
||||
case "--verbose", "-v":
|
||||
config.verbose = true
|
||||
case "--max-body-size":
|
||||
if value, ok := get_value(args, &i); ok {
|
||||
if size, parse_ok := strconv.parse_int(value); parse_ok {
|
||||
config.max_body_size = size
|
||||
}
|
||||
}
|
||||
case "--max-headers":
|
||||
if value, ok := get_value(args, &i); ok {
|
||||
if count, parse_ok := strconv.parse_int(value); parse_ok {
|
||||
config.max_headers = count
|
||||
}
|
||||
}
|
||||
case "--no-keep-alive":
|
||||
config.enable_keep_alive = false
|
||||
case "--help":
|
||||
print_help()
|
||||
os.exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
print_help :: proc() {
|
||||
help_text := `
|
||||
JormunDB - DynamoDB-Compatible Database Server
|
||||
|
||||
USAGE:
|
||||
jormundb [OPTIONS]
|
||||
|
||||
OPTIONS:
|
||||
--host, -h <HOST> Server bind address (default: 0.0.0.0)
|
||||
--port, -p <PORT> Server port (default: 8002)
|
||||
--data-dir, -d <DIR> Data directory path (default: ./data)
|
||||
--verbose, -v Enable verbose logging
|
||||
--max-body-size <BYTES> Maximum request body size in bytes (default: 104857600 = 100MB)
|
||||
--max-headers <COUNT> Maximum number of headers per request (default: 100)
|
||||
--no-keep-alive Disable HTTP keep-alive connections
|
||||
--help Show this help message
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
JORMUN_HOST Same as --host
|
||||
JORMUN_PORT Same as --port
|
||||
JORMUN_DATA_DIR Same as --data-dir
|
||||
JORMUN_VERBOSE Set to "1" to enable verbose mode
|
||||
JORMUN_MAX_BODY_SIZE Same as --max-body-size
|
||||
|
||||
EXAMPLES:
|
||||
# Start with default settings
|
||||
jormundb
|
||||
|
||||
# Custom port and data directory
|
||||
jormundb --port 9000 --data-dir /var/lib/jormundb
|
||||
|
||||
# Limit body size to 10MB
|
||||
jormundb --max-body-size 10485760
|
||||
|
||||
# Use environment variables
|
||||
JORMUN_PORT=9000 JORMUN_HOST=127.0.0.1 jormundb
|
||||
`
|
||||
fmt.println(help_text)
|
||||
}
|
||||
|
||||
print_banner :: proc(config: Config) {
|
||||
banner := `
|
||||
╔═══════════════════════════════════════════════╗
|
||||
|
||||
884
open_api_doc.yaml
Normal file
884
open_api_doc.yaml
Normal file
@@ -0,0 +1,884 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: JormunDB DynamoDB Wire API
|
||||
version: 0.1.0
|
||||
description: |
|
||||
DynamoDB-compatible JSON-over-HTTP API implemented by JormunDB.
|
||||
Requests are POSTed to a single endpoint (/) and routed by the required `X-Amz-Target` header.
|
||||
servers:
|
||||
- url: http://localhost:8002
|
||||
|
||||
paths:
|
||||
/:
|
||||
post:
|
||||
summary: DynamoDB JSON API endpoint
|
||||
description: |
|
||||
Send DynamoDB JSON protocol requests to this endpoint and set `X-Amz-Target` to the operation name,
|
||||
e.g. `DynamoDB_20120810.GetItem`. The request and response media type is typically
|
||||
`application/x-amz-json-1.0`.
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/XAmzTarget'
|
||||
- $ref: '#/components/parameters/XAmzDate'
|
||||
- $ref: '#/components/parameters/Authorization'
|
||||
- $ref: '#/components/parameters/XAmzSecurityToken'
|
||||
- $ref: '#/components/parameters/XAmzContentSha256'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/CreateTableRequest'
|
||||
- $ref: '#/components/schemas/DeleteTableRequest'
|
||||
- $ref: '#/components/schemas/DescribeTableRequest'
|
||||
- $ref: '#/components/schemas/ListTablesRequest'
|
||||
- $ref: '#/components/schemas/PutItemRequest'
|
||||
- $ref: '#/components/schemas/GetItemRequest'
|
||||
- $ref: '#/components/schemas/DeleteItemRequest'
|
||||
- $ref: '#/components/schemas/UpdateItemRequest'
|
||||
- $ref: '#/components/schemas/QueryRequest'
|
||||
- $ref: '#/components/schemas/ScanRequest'
|
||||
- $ref: '#/components/schemas/BatchWriteItemRequest'
|
||||
- $ref: '#/components/schemas/BatchGetItemRequest'
|
||||
- $ref: '#/components/schemas/TransactWriteItemsRequest'
|
||||
- $ref: '#/components/schemas/TransactGetItemsRequest'
|
||||
examples:
|
||||
CreateTable:
|
||||
summary: Create a table with a HASH key
|
||||
value:
|
||||
TableName: ExampleTable
|
||||
KeySchema:
|
||||
- AttributeName: pk
|
||||
KeyType: HASH
|
||||
AttributeDefinitions:
|
||||
- AttributeName: pk
|
||||
AttributeType: S
|
||||
responses:
|
||||
'200':
|
||||
description: Successful operation response
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/CreateTableResponse'
|
||||
- $ref: '#/components/schemas/DeleteTableResponse'
|
||||
- $ref: '#/components/schemas/DescribeTableResponse'
|
||||
- $ref: '#/components/schemas/ListTablesResponse'
|
||||
- $ref: '#/components/schemas/PutItemResponse'
|
||||
- $ref: '#/components/schemas/GetItemResponseUnion'
|
||||
- $ref: '#/components/schemas/DeleteItemResponse'
|
||||
- $ref: '#/components/schemas/UpdateItemResponseUnion'
|
||||
- $ref: '#/components/schemas/QueryResponse'
|
||||
- $ref: '#/components/schemas/ScanResponse'
|
||||
- $ref: '#/components/schemas/BatchWriteItemResponse'
|
||||
- $ref: '#/components/schemas/BatchGetItemResponse'
|
||||
- $ref: '#/components/schemas/TransactWriteItemsResponse'
|
||||
- $ref: '#/components/schemas/TransactGetItemsResponse'
|
||||
'400':
|
||||
description: Client error (ValidationException, SerializationException, etc.)
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/DynamoDbError'
|
||||
- $ref: '#/components/schemas/TransactionCanceledException'
|
||||
'500':
|
||||
description: Server error
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
$ref: '#/components/schemas/DynamoDbError'
|
||||
|
||||
components:
|
||||
parameters:
|
||||
XAmzTarget:
|
||||
name: X-Amz-Target
|
||||
in: header
|
||||
required: true
|
||||
description: |
|
||||
DynamoDB JSON protocol operation selector.
|
||||
JormunDB recognizes targets with the `DynamoDB_20120810.` prefix.
|
||||
Note: `UpdateTable` may be recognized but not implemented.
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- DynamoDB_20120810.CreateTable
|
||||
- DynamoDB_20120810.DeleteTable
|
||||
- DynamoDB_20120810.DescribeTable
|
||||
- DynamoDB_20120810.ListTables
|
||||
- DynamoDB_20120810.UpdateTable
|
||||
- DynamoDB_20120810.PutItem
|
||||
- DynamoDB_20120810.GetItem
|
||||
- DynamoDB_20120810.DeleteItem
|
||||
- DynamoDB_20120810.UpdateItem
|
||||
- DynamoDB_20120810.Query
|
||||
- DynamoDB_20120810.Scan
|
||||
- DynamoDB_20120810.BatchGetItem
|
||||
- DynamoDB_20120810.BatchWriteItem
|
||||
- DynamoDB_20120810.TransactGetItems
|
||||
- DynamoDB_20120810.TransactWriteItems
|
||||
example: DynamoDB_20120810.GetItem
|
||||
|
||||
XAmzDate:
|
||||
name: X-Amz-Date
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 timestamp header (kept for SDK compatibility).
|
||||
|
||||
Authorization:
|
||||
name: Authorization
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 Authorization header (kept for SDK compatibility).
|
||||
|
||||
XAmzSecurityToken:
|
||||
name: X-Amz-Security-Token
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 session token header (kept for SDK compatibility).
|
||||
|
||||
XAmzContentSha256:
|
||||
name: X-Amz-Content-Sha256
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 payload hash header (kept for SDK compatibility).
|
||||
|
||||
schemas:
|
||||
EmptyObject:
|
||||
type: object
|
||||
description: Empty JSON object.
|
||||
additionalProperties: false
|
||||
|
||||
# -------------------------
|
||||
# AttributeValue & helpers
|
||||
# -------------------------
|
||||
AttributeValue:
|
||||
description: DynamoDB AttributeValue (JSON wire format).
|
||||
type: object
|
||||
minProperties: 1
|
||||
maxProperties: 1
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/AttrS'
|
||||
- $ref: '#/components/schemas/AttrN'
|
||||
- $ref: '#/components/schemas/AttrB'
|
||||
- $ref: '#/components/schemas/AttrBOOL'
|
||||
- $ref: '#/components/schemas/AttrNULL'
|
||||
- $ref: '#/components/schemas/AttrSS'
|
||||
- $ref: '#/components/schemas/AttrNS'
|
||||
- $ref: '#/components/schemas/AttrBS'
|
||||
- $ref: '#/components/schemas/AttrL'
|
||||
- $ref: '#/components/schemas/AttrM'
|
||||
|
||||
AttrS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [S]
|
||||
properties:
|
||||
S:
|
||||
type: string
|
||||
example: hello
|
||||
|
||||
AttrN:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [N]
|
||||
properties:
|
||||
N:
|
||||
type: string
|
||||
description: Numeric values are encoded as strings in DynamoDB's JSON protocol.
|
||||
example: "42"
|
||||
|
||||
AttrB:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [B]
|
||||
properties:
|
||||
B:
|
||||
type: string
|
||||
description: Base64-encoded binary value.
|
||||
example: AAECAwQ=
|
||||
|
||||
AttrBOOL:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [BOOL]
|
||||
properties:
|
||||
BOOL:
|
||||
type: boolean
|
||||
example: true
|
||||
|
||||
AttrNULL:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [NULL]
|
||||
properties:
|
||||
NULL:
|
||||
type: boolean
|
||||
enum: [true]
|
||||
example: true
|
||||
|
||||
AttrSS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [SS]
|
||||
properties:
|
||||
SS:
|
||||
type: array
|
||||
items: { type: string }
|
||||
example: [a, b]
|
||||
|
||||
AttrNS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [NS]
|
||||
properties:
|
||||
NS:
|
||||
type: array
|
||||
description: Numeric set values are encoded as strings.
|
||||
items: { type: string }
|
||||
example: ["1", "2"]
|
||||
|
||||
AttrBS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [BS]
|
||||
properties:
|
||||
BS:
|
||||
type: array
|
||||
description: Base64-encoded binary set values.
|
||||
items: { type: string }
|
||||
example: [AAE=, AgM=]
|
||||
|
||||
AttrL:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [L]
|
||||
properties:
|
||||
L:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/AttributeValue'
|
||||
|
||||
AttrM:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [M]
|
||||
properties:
|
||||
M:
|
||||
$ref: '#/components/schemas/AttributeMap'
|
||||
|
||||
AttributeMap:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/AttributeValue'
|
||||
example:
|
||||
pk: { S: "user#1" }
|
||||
sk: { S: "meta" }
|
||||
age: { N: "30" }
|
||||
|
||||
ExpressionAttributeNames:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
example:
|
||||
"#pk": "pk"
|
||||
|
||||
ExpressionAttributeValues:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/AttributeValue'
|
||||
example:
|
||||
":v": { S: "user#1" }
|
||||
|
||||
Key:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/AttributeMap'
|
||||
description: Primary key map (HASH, optionally RANGE) encoded as an AttributeMap.
|
||||
|
||||
ReturnValues:
|
||||
type: string
|
||||
description: ReturnValues selector used by UpdateItem.
|
||||
enum: [NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW]
|
||||
example: ALL_NEW
|
||||
|
||||
# -------------------------
|
||||
# Table shapes
|
||||
# -------------------------
|
||||
ScalarAttributeType:
|
||||
type: string
|
||||
enum: [S, N, B]
|
||||
example: S
|
||||
|
||||
AttributeDefinition:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [AttributeName, AttributeType]
|
||||
properties:
|
||||
AttributeName: { type: string }
|
||||
AttributeType: { $ref: '#/components/schemas/ScalarAttributeType' }
|
||||
|
||||
KeyType:
|
||||
type: string
|
||||
enum: [HASH, RANGE]
|
||||
example: HASH
|
||||
|
||||
KeySchemaElement:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [AttributeName, KeyType]
|
||||
properties:
|
||||
AttributeName: { type: string }
|
||||
KeyType: { $ref: '#/components/schemas/KeyType' }
|
||||
|
||||
ProjectionType:
|
||||
type: string
|
||||
enum: [KEYS_ONLY, INCLUDE, ALL]
|
||||
example: ALL
|
||||
|
||||
Projection:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [ProjectionType]
|
||||
properties:
|
||||
ProjectionType: { $ref: '#/components/schemas/ProjectionType' }
|
||||
NonKeyAttributes:
|
||||
type: array
|
||||
items: { type: string }
|
||||
|
||||
GlobalSecondaryIndex:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [IndexName, KeySchema, Projection]
|
||||
properties:
|
||||
IndexName: { type: string }
|
||||
KeySchema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/KeySchemaElement' }
|
||||
minItems: 1
|
||||
Projection: { $ref: '#/components/schemas/Projection' }
|
||||
|
||||
TableStatus:
|
||||
type: string
|
||||
enum: [CREATING, UPDATING, DELETING, ACTIVE, ARCHIVING, ARCHIVED]
|
||||
example: ACTIVE
|
||||
|
||||
TableDescription:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableName, TableStatus]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
TableStatus: { $ref: '#/components/schemas/TableStatus' }
|
||||
CreationDateTime:
|
||||
type: integer
|
||||
format: int64
|
||||
description: Unix epoch seconds.
|
||||
KeySchema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/KeySchemaElement' }
|
||||
AttributeDefinitions:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeDefinition' }
|
||||
GlobalSecondaryIndexes:
|
||||
type: array
|
||||
items:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/GlobalSecondaryIndex'
|
||||
- type: object
|
||||
properties:
|
||||
IndexStatus:
|
||||
type: string
|
||||
enum: [ACTIVE]
|
||||
|
||||
# -------------------------
|
||||
# Error shapes
|
||||
# -------------------------
|
||||
DynamoDbError:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [__type, message]
|
||||
properties:
|
||||
__type:
|
||||
type: string
|
||||
description: DynamoDB error type identifier.
|
||||
example: com.amazonaws.dynamodb.v20120810#ValidationException
|
||||
message:
|
||||
type: string
|
||||
example: Invalid request
|
||||
|
||||
TransactionCanceledException:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [__type, message, CancellationReasons]
|
||||
properties:
|
||||
__type:
|
||||
type: string
|
||||
enum: [com.amazonaws.dynamodb.v20120810#TransactionCanceledException]
|
||||
message:
|
||||
type: string
|
||||
CancellationReasons:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Code, Message]
|
||||
properties:
|
||||
Code: { type: string, example: ConditionalCheckFailed }
|
||||
Message: { type: string, example: The conditional request failed }
|
||||
|
||||
# -------------------------
|
||||
# API: CreateTable
|
||||
# -------------------------
|
||||
CreateTableRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, KeySchema, AttributeDefinitions]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
KeySchema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/KeySchemaElement' }
|
||||
minItems: 1
|
||||
AttributeDefinitions:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeDefinition' }
|
||||
minItems: 1
|
||||
GlobalSecondaryIndexes:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/GlobalSecondaryIndex' }
|
||||
description: |
|
||||
CreateTable request. JormunDB focuses on TableName, KeySchema, AttributeDefinitions, and optional GSI definitions.
|
||||
|
||||
CreateTableResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableDescription]
|
||||
properties:
|
||||
TableDescription:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableName, TableStatus, CreationDateTime]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
TableStatus: { $ref: '#/components/schemas/TableStatus' }
|
||||
CreationDateTime: { type: integer, format: int64 }
|
||||
|
||||
# -------------------------
|
||||
# API: DeleteTable / DescribeTable / ListTables
|
||||
# -------------------------
|
||||
DeleteTableRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
|
||||
DeleteTableResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableDescription]
|
||||
properties:
|
||||
TableDescription:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableName, TableStatus]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
TableStatus:
|
||||
type: string
|
||||
enum: [DELETING]
|
||||
|
||||
DescribeTableRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
|
||||
DescribeTableResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Table]
|
||||
properties:
|
||||
Table: { $ref: '#/components/schemas/TableDescription' }
|
||||
|
||||
ListTablesRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
description: ListTables request. JormunDB ignores request fields for this operation.
|
||||
|
||||
ListTablesResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableNames]
|
||||
properties:
|
||||
TableNames:
|
||||
type: array
|
||||
items: { type: string }
|
||||
|
||||
# -------------------------
|
||||
# API: PutItem / GetItem / DeleteItem
|
||||
# -------------------------
|
||||
PutItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Item]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
PutItemResponse:
|
||||
$ref: '#/components/schemas/EmptyObject'
|
||||
|
||||
GetItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
|
||||
GetItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Item]
|
||||
properties:
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
|
||||
GetItemResponseUnion:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/EmptyObject'
|
||||
- $ref: '#/components/schemas/GetItemResponse'
|
||||
|
||||
DeleteItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
DeleteItemResponse:
|
||||
$ref: '#/components/schemas/EmptyObject'
|
||||
|
||||
# -------------------------
|
||||
# API: UpdateItem
|
||||
# -------------------------
|
||||
UpdateItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key, UpdateExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
UpdateExpression: { type: string }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
ReturnValues: { $ref: '#/components/schemas/ReturnValues' }
|
||||
|
||||
UpdateItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Attributes]
|
||||
properties:
|
||||
Attributes: { $ref: '#/components/schemas/AttributeMap' }
|
||||
|
||||
UpdateItemResponseUnion:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/EmptyObject'
|
||||
- $ref: '#/components/schemas/UpdateItemResponse'
|
||||
|
||||
# -------------------------
|
||||
# API: Query / Scan
|
||||
# -------------------------
|
||||
QueryRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, KeyConditionExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
IndexName: { type: string }
|
||||
KeyConditionExpression: { type: string }
|
||||
FilterExpression: { type: string }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
Limit:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 1
|
||||
description: Maximum items to return (default 100 if omitted/0 in JormunDB).
|
||||
ExclusiveStartKey: { $ref: '#/components/schemas/Key' }
|
||||
ScanIndexForward:
|
||||
type: boolean
|
||||
description: Sort order for RANGE key queries (if applicable).
|
||||
|
||||
ScanRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
IndexName: { type: string }
|
||||
FilterExpression: { type: string }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
Limit:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 1
|
||||
description: Maximum items to return (default 100 if omitted/0 in JormunDB).
|
||||
ExclusiveStartKey: { $ref: '#/components/schemas/Key' }
|
||||
|
||||
ItemsPage:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Items, Count, ScannedCount]
|
||||
properties:
|
||||
Items:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeMap' }
|
||||
Count:
|
||||
type: integer
|
||||
format: int32
|
||||
ScannedCount:
|
||||
type: integer
|
||||
format: int32
|
||||
LastEvaluatedKey:
|
||||
$ref: '#/components/schemas/Key'
|
||||
|
||||
QueryResponse:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ItemsPage'
|
||||
|
||||
ScanResponse:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ItemsPage'
|
||||
|
||||
# -------------------------
|
||||
# API: BatchWriteItem
|
||||
# -------------------------
|
||||
WriteRequest:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
properties:
|
||||
PutRequest:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Item]
|
||||
properties:
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
DeleteRequest:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Key]
|
||||
properties:
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
oneOf:
|
||||
- required: [PutRequest]
|
||||
- required: [DeleteRequest]
|
||||
|
||||
BatchWriteItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [RequestItems]
|
||||
properties:
|
||||
RequestItems:
|
||||
type: object
|
||||
description: Map of table name to write requests.
|
||||
additionalProperties:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/WriteRequest' }
|
||||
|
||||
BatchWriteItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [UnprocessedItems]
|
||||
properties:
|
||||
UnprocessedItems:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/WriteRequest' }
|
||||
|
||||
# -------------------------
|
||||
# API: BatchGetItem
|
||||
# -------------------------
|
||||
KeysAndAttributes:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [Keys]
|
||||
properties:
|
||||
Keys:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/Key' }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
|
||||
BatchGetItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [RequestItems]
|
||||
properties:
|
||||
RequestItems:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/KeysAndAttributes'
|
||||
|
||||
BatchGetItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Responses, UnprocessedKeys]
|
||||
properties:
|
||||
Responses:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeMap' }
|
||||
UnprocessedKeys:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/KeysAndAttributes'
|
||||
|
||||
# -------------------------
|
||||
# API: TransactWriteItems / TransactGetItems
|
||||
# -------------------------
|
||||
TransactWriteItemsRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TransactItems]
|
||||
properties:
|
||||
TransactItems:
|
||||
type: array
|
||||
minItems: 1
|
||||
maxItems: 100
|
||||
items:
|
||||
$ref: '#/components/schemas/TransactWriteItem'
|
||||
|
||||
TransactWriteItem:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
oneOf:
|
||||
- required: [Put]
|
||||
- required: [Delete]
|
||||
- required: [Update]
|
||||
- required: [ConditionCheck]
|
||||
properties:
|
||||
Put:
|
||||
$ref: '#/components/schemas/TransactPut'
|
||||
Delete:
|
||||
$ref: '#/components/schemas/TransactDelete'
|
||||
Update:
|
||||
$ref: '#/components/schemas/TransactUpdate'
|
||||
ConditionCheck:
|
||||
$ref: '#/components/schemas/TransactConditionCheck'
|
||||
|
||||
TransactPut:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Item]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactDelete:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactUpdate:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key, UpdateExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
UpdateExpression: { type: string }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactConditionCheck:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key, ConditionExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactWriteItemsResponse:
|
||||
$ref: '#/components/schemas/EmptyObject'
|
||||
|
||||
TransactGetItemsRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TransactItems]
|
||||
properties:
|
||||
TransactItems:
|
||||
type: array
|
||||
minItems: 1
|
||||
maxItems: 100
|
||||
items:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Get]
|
||||
properties:
|
||||
Get:
|
||||
$ref: '#/components/schemas/TransactGet'
|
||||
|
||||
TransactGet:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
|
||||
TransactGetItemResult:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/EmptyObject'
|
||||
- type: object
|
||||
additionalProperties: false
|
||||
required: [Item]
|
||||
properties:
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
|
||||
TransactGetItemsResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Responses]
|
||||
properties:
|
||||
Responses:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/TransactGetItemResult' }
|
||||
@@ -267,7 +267,7 @@ parse_transact_put_action :: proc(
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = string(tn_str)
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Item
|
||||
item_val, item_found := obj["Item"]
|
||||
@@ -301,7 +301,7 @@ parse_transact_key_action :: proc(
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = string(tn_str)
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Key
|
||||
key_val, key_found := obj["Key"]
|
||||
@@ -335,7 +335,7 @@ parse_transact_update_action :: proc(
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = string(tn_str)
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Key
|
||||
key_val, key_found := obj["Key"]
|
||||
@@ -483,7 +483,9 @@ handle_transact_get_items :: proc(
|
||||
}
|
||||
|
||||
// Build response
|
||||
builder := strings.builder_make()
|
||||
builder := strings.builder_make(context.allocator)
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
strings.write_string(&builder, `{"Responses":[`)
|
||||
|
||||
for maybe_item, i in result.items {
|
||||
@@ -492,8 +494,9 @@ handle_transact_get_items :: proc(
|
||||
}
|
||||
|
||||
if item, has_item := maybe_item.?; has_item {
|
||||
item_json := dynamodb.serialize_item(item)
|
||||
fmt.sbprintf(&builder, `{{"Item":%s}}`, item_json)
|
||||
strings.write_string(&builder, `{"Item":`)
|
||||
dynamodb.serialize_item_to_builder(&builder, item)
|
||||
strings.write_string(&builder, `}`)
|
||||
} else {
|
||||
strings.write_string(&builder, "{}")
|
||||
}
|
||||
@@ -501,7 +504,8 @@ handle_transact_get_items :: proc(
|
||||
|
||||
strings.write_string(&builder, "]}")
|
||||
|
||||
resp_body := strings.to_string(builder)
|
||||
// Clone the string or we gonna have issues again
|
||||
resp_body := strings.clone(strings.to_string(builder))
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
}
|
||||
|
||||
@@ -519,7 +523,7 @@ parse_transact_get_action :: proc(obj: json.Object) -> (dynamodb.Transact_Get_Ac
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = string(tn_str)
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Key
|
||||
key_val, key_found := obj["Key"]
|
||||
|
||||
Reference in New Issue
Block a user