Compare commits
50 Commits
37b423ce1a
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 5ee3df86f1 | |||
| 47eefd0fe5 | |||
| 443562dfb6 | |||
| 9cf54e1b9f | |||
| a7f2a5ab59 | |||
| 6bc1a03347 | |||
| 178b38fe18 | |||
| b92dc61b08 | |||
| 64da021148 | |||
| 12ba2e57d7 | |||
| a5a5d41e50 | |||
| d8a80bd728 | |||
| 225a1533cc | |||
| a6bf357228 | |||
| 78a4ea7a0c | |||
| 228b422393 | |||
| 96de080d10 | |||
| a77676bbc7 | |||
| 4404f2796d | |||
| 26281bc16d | |||
| 29fe8a60c3 | |||
| f8b0b1c3ae | |||
| 089ef39bd9 | |||
| 9518eb255e | |||
| f0d3eca5cb | |||
| c8ada180ce | |||
| 4b8e424085 | |||
| 96896a0f97 | |||
| 06ed6a2c97 | |||
| 2b04e29331 | |||
| 972e6ece5e | |||
| 31e80ac572 | |||
| cd4ee1cbd7 | |||
| ffd3eda63c | |||
| c6a78ca054 | |||
| b510c000ec | |||
| 280ce15b07 | |||
| 78b86d7534 | |||
| 55e0082667 | |||
| c1f72cee8b | |||
| 7a2f26b75d | |||
| 94296ae925 | |||
| cf352dde23 | |||
| 42db451349 | |||
| c4da5ecc14 | |||
| 61ce4e7b21 | |||
| a68f5016c6 | |||
| 416b9c9187 | |||
| d318534782 | |||
| ad599a0af7 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
./build
|
||||
./data
|
||||
build
|
||||
data
|
||||
project_context.txt
|
||||
jormundb-odin-project_context.txt
|
||||
426
ARCHITECTURE.md
426
ARCHITECTURE.md
@@ -1,426 +0,0 @@
|
||||
## JormunDB Architecture
|
||||
|
||||
This document explains the internal architecture of JormunDB, including design decisions, storage formats, and the arena-per-request memory management pattern.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Why Odin?](#why-odin)
|
||||
- [Memory Management](#memory-management)
|
||||
- [Storage Format](#storage-format)
|
||||
- [Module Structure](#module-structure)
|
||||
- [Request Flow](#request-flow)
|
||||
- [Concurrency Model](#concurrency-model)
|
||||
|
||||
## Overview
|
||||
|
||||
JormunDB is a DynamoDB-compatible database server that speaks the DynamoDB wire protocol. It uses RocksDB for persistent storage and is written in Odin for elegant memory management.
|
||||
|
||||
### Key Design Goals
|
||||
|
||||
1. **Zero allocation ceremony** - No explicit `defer free()` or error handling for every allocation
|
||||
2. **Binary storage** - Efficient TLV encoding instead of JSON
|
||||
3. **API compatibility** - Drop-in replacement for DynamoDB Local
|
||||
4. **Performance** - RocksDB-backed with efficient key encoding
|
||||
|
||||
## Why Odin?
|
||||
|
||||
The original implementation in Zig suffered from explicit allocator threading:
|
||||
|
||||
```zig
|
||||
// Zig version - explicit allocator everywhere
|
||||
fn handleRequest(allocator: std.mem.Allocator, request: []const u8) !Response {
|
||||
const parsed = try parseJson(allocator, request);
|
||||
defer parsed.deinit(allocator);
|
||||
|
||||
const item = try storage.getItem(allocator, parsed.table_name, parsed.key);
|
||||
defer if (item) |i| freeItem(allocator, i);
|
||||
|
||||
const response = try serializeResponse(allocator, item);
|
||||
defer allocator.free(response);
|
||||
|
||||
return response; // Wait, we deferred the free!
|
||||
}
|
||||
```
|
||||
|
||||
Odin's context allocator system eliminates this:
|
||||
|
||||
```odin
|
||||
// Odin version - implicit context allocator
|
||||
handle_request :: proc(request: []byte) -> Response {
|
||||
// All allocations use context.allocator automatically
|
||||
parsed := parse_json(request)
|
||||
item := storage_get_item(parsed.table_name, parsed.key)
|
||||
response := serialize_response(item)
|
||||
|
||||
return response
|
||||
// Everything freed when arena is destroyed
|
||||
}
|
||||
```
|
||||
|
||||
## Memory Management
|
||||
|
||||
JormunDB uses a two-allocator strategy:
|
||||
|
||||
### 1. Arena Allocator (Request-Scoped)
|
||||
|
||||
Every HTTP request gets its own arena:
|
||||
|
||||
```odin
|
||||
handle_connection :: proc(conn: net.TCP_Socket) {
|
||||
// Create arena for this request (4MB)
|
||||
arena: mem.Arena
|
||||
mem.arena_init(&arena, make([]byte, mem.Megabyte * 4))
|
||||
defer mem.arena_destroy(&arena)
|
||||
|
||||
// Set context allocator
|
||||
context.allocator = mem.arena_allocator(&arena)
|
||||
|
||||
// All downstream code uses context.allocator
|
||||
request := parse_http_request(conn) // uses arena
|
||||
response := handle_request(request) // uses arena
|
||||
send_response(conn, response) // uses arena
|
||||
|
||||
// Arena is freed here - everything cleaned up automatically
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- No individual `free()` calls needed
|
||||
- No `errdefer` cleanup
|
||||
- No use-after-free bugs
|
||||
- No memory leaks from forgotten frees
|
||||
- Predictable performance (no GC pauses)
|
||||
|
||||
### 2. Default Allocator (Long-Lived Data)
|
||||
|
||||
The default allocator (typically `context.allocator` at program start) is used for:
|
||||
|
||||
- Table metadata
|
||||
- Table locks (sync.RW_Mutex)
|
||||
- Engine state
|
||||
- Items returned from storage layer (copied to request arena when needed)
|
||||
|
||||
## Storage Format
|
||||
|
||||
### Binary Keys (Varint-Prefixed Segments)
|
||||
|
||||
All keys use varint length prefixes for space efficiency:
|
||||
|
||||
```
|
||||
Meta key: [0x01][len][table_name]
|
||||
Data key: [0x02][len][table_name][len][pk_value][len][sk_value]?
|
||||
GSI key: [0x03][len][table_name][len][index_name][len][gsi_pk][len][gsi_sk]?
|
||||
LSI key: [0x04][len][table_name][len][index_name][len][pk][len][lsi_sk]
|
||||
```
|
||||
|
||||
**Example Data Key:**
|
||||
```
|
||||
Table: "Users"
|
||||
PK: "user:123"
|
||||
SK: "profile"
|
||||
|
||||
Encoded:
|
||||
[0x02] // Entity type (Data)
|
||||
[0x05] // Table name length (5)
|
||||
Users // Table name bytes
|
||||
[0x08] // PK length (8)
|
||||
user:123 // PK bytes
|
||||
[0x07] // SK length (7)
|
||||
profile // SK bytes
|
||||
```
|
||||
|
||||
### Item Encoding (TLV Format)
|
||||
|
||||
Items use Tag-Length-Value encoding for space efficiency:
|
||||
|
||||
```
|
||||
Format:
|
||||
[attr_count:varint]
|
||||
[name_len:varint][name:bytes][type_tag:u8][value_len:varint][value:bytes]...
|
||||
|
||||
Type Tags:
|
||||
String = 0x01 Number = 0x02 Binary = 0x03
|
||||
Bool = 0x04 Null = 0x05
|
||||
SS = 0x10 NS = 0x11 BS = 0x12
|
||||
List = 0x20 Map = 0x21
|
||||
```
|
||||
|
||||
**Example Item:**
|
||||
```json
|
||||
{
|
||||
"id": {"S": "user123"},
|
||||
"age": {"N": "30"}
|
||||
}
|
||||
```
|
||||
|
||||
Encoded as:
|
||||
```
|
||||
[0x02] // 2 attributes
|
||||
[0x02] // name length (2)
|
||||
id // name bytes
|
||||
[0x01] // type tag (String)
|
||||
[0x07] // value length (7)
|
||||
user123 // value bytes
|
||||
|
||||
[0x03] // name length (3)
|
||||
age // name bytes
|
||||
[0x02] // type tag (Number)
|
||||
[0x02] // value length (2)
|
||||
30 // value bytes (stored as string)
|
||||
```
|
||||
|
||||
## Module Structure
|
||||
|
||||
```
|
||||
jormundb/
|
||||
├── main.odin # Entry point, HTTP server
|
||||
├── rocksdb/ # RocksDB C FFI bindings
|
||||
│ └── rocksdb.odin # db_open, db_put, db_get, etc.
|
||||
├── dynamodb/ # DynamoDB protocol implementation
|
||||
│ ├── types.odin # Core types (Attribute_Value, Item, Key, etc.)
|
||||
│ ├── json.odin # DynamoDB JSON parsing/serialization
|
||||
│ ├── storage.odin # Storage engine (CRUD, scan, query)
|
||||
│ └── handler.odin # HTTP request handlers
|
||||
├── key_codec/ # Binary key encoding
|
||||
│ └── key_codec.odin # build_data_key, decode_data_key, etc.
|
||||
└── item_codec/ # Binary TLV item encoding
|
||||
└── item_codec.odin # encode, decode
|
||||
```
|
||||
|
||||
## Request Flow
|
||||
|
||||
```
|
||||
1. HTTP POST / arrives
|
||||
↓
|
||||
2. Create arena allocator (4MB)
|
||||
Set context.allocator = arena_allocator
|
||||
↓
|
||||
3. Parse HTTP headers
|
||||
Extract X-Amz-Target → Operation
|
||||
↓
|
||||
4. Parse JSON body
|
||||
Convert DynamoDB JSON → internal types
|
||||
↓
|
||||
5. Route to handler (e.g., handle_put_item)
|
||||
↓
|
||||
6. Storage engine operation
|
||||
- Build binary key
|
||||
- Encode item to TLV
|
||||
- RocksDB put/get/delete
|
||||
↓
|
||||
7. Build response
|
||||
- Serialize item to DynamoDB JSON
|
||||
- Format HTTP response
|
||||
↓
|
||||
8. Send response
|
||||
↓
|
||||
9. Destroy arena
|
||||
All request memory freed automatically
|
||||
```
|
||||
|
||||
## Concurrency Model
|
||||
|
||||
### Table-Level RW Locks
|
||||
|
||||
Each table has a reader-writer lock:
|
||||
|
||||
```odin
|
||||
Storage_Engine :: struct {
|
||||
db: rocksdb.DB,
|
||||
table_locks: map[string]^sync.RW_Mutex,
|
||||
table_locks_mutex: sync.Mutex,
|
||||
}
|
||||
```
|
||||
|
||||
**Read Operations** (GetItem, Query, Scan):
|
||||
- Acquire shared lock
|
||||
- Multiple readers can run concurrently
|
||||
- Writers are blocked
|
||||
|
||||
**Write Operations** (PutItem, DeleteItem, UpdateItem):
|
||||
- Acquire exclusive lock
|
||||
- Only one writer at a time
|
||||
- All readers are blocked
|
||||
|
||||
### Thread Safety
|
||||
|
||||
- RocksDB handles are thread-safe (column family-based)
|
||||
- Table metadata is protected by locks
|
||||
- Request arenas are thread-local (no sharing)
|
||||
|
||||
## Error Handling
|
||||
|
||||
Odin uses explicit error returns via `or_return`:
|
||||
|
||||
```odin
|
||||
// Odin error handling
|
||||
parse_json :: proc(data: []byte) -> (Item, bool) {
|
||||
parsed := json.parse(data) or_return
|
||||
item := json_to_item(parsed) or_return
|
||||
return item, true
|
||||
}
|
||||
|
||||
// Usage
|
||||
item := parse_json(request.body) or_else {
|
||||
return error_response(.ValidationException, "Invalid JSON")
|
||||
}
|
||||
```
|
||||
|
||||
No exceptions, no panic-recover patterns. Every error path is explicit.
|
||||
|
||||
## DynamoDB Wire Protocol
|
||||
|
||||
### Request Format
|
||||
|
||||
```
|
||||
POST / HTTP/1.1
|
||||
X-Amz-Target: DynamoDB_20120810.PutItem
|
||||
Content-Type: application/x-amz-json-1.0
|
||||
|
||||
{
|
||||
"TableName": "Users",
|
||||
"Item": {
|
||||
"id": {"S": "user123"},
|
||||
"name": {"S": "Alice"}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Response Format
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/x-amz-json-1.0
|
||||
x-amzn-RequestId: local-request-id
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
### Error Format
|
||||
|
||||
```json
|
||||
{
|
||||
"__type": "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException",
|
||||
"message": "Table not found"
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Time Complexity
|
||||
|
||||
| Operation | Complexity | Notes |
|
||||
|-----------|-----------|-------|
|
||||
| PutItem | O(log n) | RocksDB LSM tree insert |
|
||||
| GetItem | O(log n) | RocksDB point lookup |
|
||||
| DeleteItem | O(log n) | RocksDB deletion |
|
||||
| Query | O(log n + m) | n = items in table, m = result set |
|
||||
| Scan | O(n) | Full table scan |
|
||||
|
||||
### Space Complexity
|
||||
|
||||
- Binary keys: ~20-100 bytes (vs 50-200 bytes JSON)
|
||||
- Binary items: ~30% smaller than JSON
|
||||
- Varint encoding saves space on small integers
|
||||
|
||||
### Benchmarks (Expected)
|
||||
|
||||
Based on Zig version performance:
|
||||
|
||||
```
|
||||
Operation Throughput Latency (p50)
|
||||
PutItem ~5,000/sec ~0.2ms
|
||||
GetItem ~7,000/sec ~0.14ms
|
||||
Query (1 item) ~8,000/sec ~0.12ms
|
||||
Scan (1000 items) ~20/sec ~50ms
|
||||
```
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Features
|
||||
|
||||
1. **UpdateExpression** - SET/REMOVE/ADD/DELETE operations
|
||||
2. **FilterExpression** - Post-query filtering
|
||||
3. **ProjectionExpression** - Return subset of attributes
|
||||
4. **Global Secondary Indexes** - Query by non-key attributes
|
||||
5. **Local Secondary Indexes** - Alternate sort keys
|
||||
6. **BatchWriteItem** - Batch mutations
|
||||
7. **BatchGetItem** - Batch reads
|
||||
8. **Transactions** - ACID multi-item operations
|
||||
|
||||
### Optimization Opportunities
|
||||
|
||||
1. **Connection pooling** - Reuse HTTP connections
|
||||
2. **Bloom filters** - Faster negative lookups
|
||||
3. **Compression** - LZ4/Zstd on large items
|
||||
4. **Caching layer** - Hot item cache
|
||||
5. **Parallel scan** - Segment-based scanning
|
||||
|
||||
## Debugging
|
||||
|
||||
### Enable Verbose Logging
|
||||
|
||||
```bash
|
||||
make run VERBOSE=1
|
||||
```
|
||||
|
||||
### Inspect RocksDB
|
||||
|
||||
```bash
|
||||
# Use ldb tool to inspect database
|
||||
ldb --db=./data scan
|
||||
ldb --db=./data get <key_hex>
|
||||
```
|
||||
|
||||
### Memory Profiling
|
||||
|
||||
Odin's tracking allocator can detect leaks:
|
||||
|
||||
```odin
|
||||
when ODIN_DEBUG {
|
||||
track: mem.Tracking_Allocator
|
||||
mem.tracking_allocator_init(&track, context.allocator)
|
||||
context.allocator = mem.tracking_allocator(&track)
|
||||
|
||||
defer {
|
||||
for _, leak in track.allocation_map {
|
||||
fmt.printfln("Leaked %d bytes at %p", leak.size, leak.location)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Migration from Zig Version
|
||||
|
||||
The Zig version (ZynamoDB) used the same binary storage format, so existing RocksDB databases can be read by JormunDB without migration.
|
||||
|
||||
### Compatibility
|
||||
|
||||
- ✅ Binary key format (byte-compatible)
|
||||
- ✅ Binary item format (byte-compatible)
|
||||
- ✅ Table metadata (JSON, compatible)
|
||||
- ✅ HTTP wire protocol (identical)
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
None - JormunDB can open ZynamoDB databases directly.
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing to JormunDB:
|
||||
|
||||
1. **Use the context allocator** - All request-scoped allocations should use `context.allocator`
|
||||
2. **Avoid manual frees** - Let the arena handle it
|
||||
3. **Long-lived data** - Use the default allocator explicitly
|
||||
4. **Test thoroughly** - Run `make test` before committing
|
||||
5. **Format code** - Run `make fmt` before committing
|
||||
|
||||
## References
|
||||
|
||||
- [Odin Language](https://odin-lang.org/)
|
||||
- [RocksDB Wiki](https://github.com/facebook/rocksdb/wiki)
|
||||
- [DynamoDB API Reference](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/)
|
||||
- [Varint Encoding](https://developers.google.com/protocol-buffers/docs/encoding#varints)
|
||||
38
Makefile
38
Makefile
@@ -14,8 +14,10 @@ SHIM_HDRS := $(SHIM_DIR)/rocksdb_shim.h
|
||||
|
||||
CXX := g++
|
||||
AR := ar
|
||||
CXXFLAGS := -O2 -fPIC -std=c++17 $(INCLUDE_PATH)
|
||||
CXXFLAGS := -O2 -fPIC -std=c++20 $(INCLUDE_PATH)
|
||||
|
||||
# name of the docker compose file for the python tests
|
||||
SDK_TEST_COMPOSE := docker-compose-python-sdk-test.yaml
|
||||
|
||||
# RocksDB and compression libraries
|
||||
ROCKSDB_LIBS := -lrocksdb -lstdc++ -lsnappy -llz4 -lzstd -lz -lbz2
|
||||
@@ -41,7 +43,7 @@ COMMON_FLAGS := -vet -strict-style
|
||||
EXTRA_LINKER_FLAGS := $(LIB_PATH) $(SHIM_LIB) $(ROCKSDB_LIBS)
|
||||
|
||||
# Runtime configuration
|
||||
PORT ?= 8000
|
||||
PORT ?= 8002
|
||||
HOST ?= 0.0.0.0
|
||||
DATA_DIR ?= ./data
|
||||
VERBOSE ?= 0
|
||||
@@ -145,35 +147,6 @@ check-deps:
|
||||
@pkg-config --exists rocksdb || (echo "$(RED)✗ RocksDB not found$(NC)" && exit 1)
|
||||
@echo "$(GREEN)✓ All dependencies found$(NC)"
|
||||
|
||||
# AWS CLI test commands
|
||||
aws-test: run &
|
||||
@sleep 2
|
||||
@echo "$(BLUE)Testing with AWS CLI...$(NC)"
|
||||
@echo "\n$(YELLOW)Creating table...$(NC)"
|
||||
@aws dynamodb create-table \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable \
|
||||
--key-schema AttributeName=pk,KeyType=HASH \
|
||||
--attribute-definitions AttributeName=pk,AttributeType=S \
|
||||
--billing-mode PAY_PER_REQUEST || true
|
||||
@echo "\n$(YELLOW)Listing tables...$(NC)"
|
||||
@aws dynamodb list-tables --endpoint-url http://localhost:$(PORT)
|
||||
@echo "\n$(YELLOW)Putting item...$(NC)"
|
||||
@aws dynamodb put-item \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable \
|
||||
--item '{"pk":{"S":"test1"},"data":{"S":"hello world"}}'
|
||||
@echo "\n$(YELLOW)Getting item...$(NC)"
|
||||
@aws dynamodb get-item \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable \
|
||||
--key '{"pk":{"S":"test1"}}'
|
||||
@echo "\n$(YELLOW)Scanning table...$(NC)"
|
||||
@aws dynamodb scan \
|
||||
--endpoint-url http://localhost:$(PORT) \
|
||||
--table-name TestTable
|
||||
@echo "\n$(GREEN)✓ AWS CLI test complete$(NC)"
|
||||
|
||||
# Development workflow
|
||||
dev: clean build run
|
||||
|
||||
@@ -191,14 +164,13 @@ help:
|
||||
@echo " make clean - Remove build artifacts"
|
||||
@echo ""
|
||||
@echo "$(GREEN)Run Commands:$(NC)"
|
||||
@echo " make run - Build and run server (default: localhost:8000)"
|
||||
@echo " make run - Build and run server (default: localhost:8002)"
|
||||
@echo " make run PORT=9000 - Run on custom port"
|
||||
@echo " make dev - Clean, build, and run"
|
||||
@echo " make quick - Fast rebuild and run"
|
||||
@echo ""
|
||||
@echo "$(GREEN)Test Commands:$(NC)"
|
||||
@echo " make test - Run unit tests"
|
||||
@echo " make aws-test - Test with AWS CLI commands"
|
||||
@echo ""
|
||||
@echo "$(GREEN)Utility Commands:$(NC)"
|
||||
@echo " make fmt - Format source code"
|
||||
|
||||
@@ -101,7 +101,7 @@ export PATH=$PATH:/path/to/odin
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Run with defaults (localhost:8000, ./data directory)
|
||||
# Run with defaults (localhost:8002, ./data directory)
|
||||
make run
|
||||
```
|
||||
|
||||
@@ -118,10 +118,10 @@ You should see:
|
||||
║ ║
|
||||
╚═══════════════════════════════════════════════╝
|
||||
|
||||
Port: 8000 | Data Dir: ./data
|
||||
Port: 8002 | Data Dir: ./data
|
||||
|
||||
Storage engine initialized at ./data
|
||||
Starting DynamoDB-compatible server on 0.0.0.0:8000
|
||||
Starting DynamoDB-compatible server on 0.0.0.0:8002
|
||||
Ready to accept connections!
|
||||
```
|
||||
|
||||
@@ -170,10 +170,10 @@ sudo apt install awscli
|
||||
aws --version
|
||||
```
|
||||
|
||||
### Configure AWS CLI (for local use)
|
||||
### Configure AWS CLI
|
||||
|
||||
```bash
|
||||
# Set dummy credentials (required but not checked by JormunDB)
|
||||
# Set dummy credentials (required but not checked by JormunDB yet)
|
||||
aws configure
|
||||
# AWS Access Key ID: dummy
|
||||
# AWS Secret Access Key: dummy
|
||||
@@ -186,7 +186,7 @@ aws configure
|
||||
**Create a Table:**
|
||||
```bash
|
||||
aws dynamodb create-table \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--key-schema \
|
||||
AttributeName=id,KeyType=HASH \
|
||||
@@ -197,13 +197,13 @@ aws dynamodb create-table \
|
||||
|
||||
**List Tables:**
|
||||
```bash
|
||||
aws dynamodb list-tables --endpoint-url http://localhost:8000
|
||||
aws dynamodb list-tables --endpoint-url http://localhost:8002
|
||||
```
|
||||
|
||||
**Put an Item:**
|
||||
```bash
|
||||
aws dynamodb put-item \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--item '{
|
||||
"id": {"S": "user123"},
|
||||
@@ -216,7 +216,7 @@ aws dynamodb put-item \
|
||||
**Get an Item:**
|
||||
```bash
|
||||
aws dynamodb get-item \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--key '{"id": {"S": "user123"}}'
|
||||
```
|
||||
@@ -224,7 +224,7 @@ aws dynamodb get-item \
|
||||
**Query Items:**
|
||||
```bash
|
||||
aws dynamodb query \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--key-condition-expression "id = :id" \
|
||||
--expression-attribute-values '{
|
||||
@@ -235,14 +235,14 @@ aws dynamodb query \
|
||||
**Scan Table:**
|
||||
```bash
|
||||
aws dynamodb scan \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users
|
||||
```
|
||||
|
||||
**Delete an Item:**
|
||||
```bash
|
||||
aws dynamodb delete-item \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--key '{"id": {"S": "user123"}}'
|
||||
```
|
||||
@@ -250,7 +250,7 @@ aws dynamodb delete-item \
|
||||
**Delete a Table:**
|
||||
```bash
|
||||
aws dynamodb delete-table \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users
|
||||
```
|
||||
|
||||
@@ -262,7 +262,7 @@ aws dynamodb delete-table \
|
||||
const { DynamoDBClient, PutItemCommand, GetItemCommand } = require("@aws-sdk/client-dynamodb");
|
||||
|
||||
const client = new DynamoDBClient({
|
||||
endpoint: "http://localhost:8000",
|
||||
endpoint: "http://localhost:8002",
|
||||
region: "us-east-1",
|
||||
credentials: {
|
||||
accessKeyId: "dummy",
|
||||
@@ -299,7 +299,7 @@ import boto3
|
||||
|
||||
dynamodb = boto3.client(
|
||||
'dynamodb',
|
||||
endpoint_url='http://localhost:8000',
|
||||
endpoint_url='http://localhost:8002',
|
||||
region_name='us-east-1',
|
||||
aws_access_key_id='dummy',
|
||||
aws_secret_access_key='dummy'
|
||||
@@ -364,8 +364,8 @@ make fmt
|
||||
### Port Already in Use
|
||||
|
||||
```bash
|
||||
# Check what's using port 8000
|
||||
lsof -i :8000
|
||||
# Check what's using port 8002
|
||||
lsof -i :8002
|
||||
|
||||
# Use a different port
|
||||
make run PORT=9000
|
||||
@@ -404,16 +404,12 @@ brew upgrade odin # macOS
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Read [ARCHITECTURE.md](ARCHITECTURE.md) for internals
|
||||
- Check [TODO.md](TODO.md) for implementation status
|
||||
- Browse source code in `dynamodb/`, `rocksdb/`, etc.
|
||||
- Contribute! See [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
|
||||
## Getting Help
|
||||
|
||||
- **Issues**: https://github.com/yourusername/jormundb/issues
|
||||
- **Discussions**: https://github.com/yourusername/jormundb/discussions
|
||||
- **Odin Discord**: https://discord.gg/sVBPHEv
|
||||
There is absolutely no support at this time
|
||||
|
||||
## Benchmarking
|
||||
|
||||
@@ -426,16 +422,12 @@ make profile
|
||||
|
||||
# Load test
|
||||
ab -n 10000 -c 100 -p item.json -T application/json \
|
||||
http://localhost:8000/
|
||||
http://localhost:8002/
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
JormunDB is designed for **local development only**. For production, use:
|
||||
|
||||
- AWS DynamoDB (managed service)
|
||||
- DynamoDB Accelerator (DAX)
|
||||
- ScyllaDB (DynamoDB-compatible)
|
||||
JormunDB is not ready for production use just yet. But there will be easy package installs, as well as a container and a helm chart
|
||||
|
||||
## Uninstalling
|
||||
|
||||
@@ -449,9 +441,3 @@ make uninstall
|
||||
# Remove data directory
|
||||
rm -rf ./data
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Happy coding! 🚀**
|
||||
|
||||
For questions or issues, please open a GitHub issue or join our Discord.
|
||||
|
||||
115
README.md
115
README.md
@@ -1,20 +1,22 @@
|
||||
# JormunDB
|
||||
<p align="center">
|
||||
<img src="https://artifacts.ewr1.vultrobjects.com/jormundb.png" alt="JormunDB logo" width="220" />
|
||||
</p>
|
||||
|
||||
<h1 align="center">JormunDB</h1>
|
||||
|
||||
<p align="center">
|
||||
A high-performance, DynamoDB-compatible database server written in Odin, backed by RocksDB.
|
||||
<br />
|
||||
<strong>DynamoDB-Compatible Database</strong> · Powered by <strong>RocksDB</strong> + <strong>Odin</strong>
|
||||
</p>
|
||||
|
||||
```
|
||||
╦╔═╗╦═╗╔╦╗╦ ╦╔╗╔╔╦╗╔╗
|
||||
║║ ║╠╦╝║║║║ ║║║║ ║║╠╩╗
|
||||
╚╝╚═╝╩╚═╩ ╩╚═╝╝╚╝═╩╝╚═╝
|
||||
DynamoDB-Compatible Database
|
||||
Powered by RocksDB + Odin
|
||||
```
|
||||
---
|
||||
|
||||
## What is JormunDB?
|
||||
|
||||
JormunDB (formerly ZynamoDB) is a local DynamoDB replacement that speaks the DynamoDB wire protocol. Point your AWS SDK or CLI at it and use it as a drop-in development database.
|
||||
JormunDB is a Self-Hosted DynamoDB replacement that speaks the DynamoDB wire protocol. Point your AWS SDK or CLI at it and use it as a drop-in replacement.
|
||||
|
||||
**Why Odin?** The original Zig implementation suffered from explicit allocator threading—every function taking an `allocator` parameter, every allocation needing `errdefer` cleanup. Odin's implicit context allocator system eliminates this ceremony: one `context.allocator = arena_allocator` at the request handler entry and everything downstream just works.
|
||||
**Why Odin?** The original Zig implementation suffered from explicit allocator threading. Where every function ended up needing an `allocator` parameter and every allocation needed `errdefer` cleanup. Odin's implicit context allocator system eliminates this ceremony. Just one `context.allocator = arena_allocator` at the request handler entry and it feels more like working with ctx in Go instead of filling out tax forms.
|
||||
|
||||
## Features
|
||||
|
||||
@@ -55,7 +57,7 @@ sudo apt install librocksdb-dev libsnappy-dev liblz4-dev libzstd-dev libbz2-dev
|
||||
# Build the server
|
||||
make build
|
||||
|
||||
# Run with default settings (localhost:8000, ./data directory)
|
||||
# Run with default settings (localhost:8002, ./data directory)
|
||||
make run
|
||||
|
||||
# Run with custom port
|
||||
@@ -70,7 +72,7 @@ make run DATA_DIR=/tmp/jormundb
|
||||
```bash
|
||||
# Create a table
|
||||
aws dynamodb create-table \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--key-schema AttributeName=id,KeyType=HASH \
|
||||
--attribute-definitions AttributeName=id,AttributeType=S \
|
||||
@@ -78,26 +80,26 @@ aws dynamodb create-table \
|
||||
|
||||
# Put an item
|
||||
aws dynamodb put-item \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--item '{"id":{"S":"user123"},"name":{"S":"Alice"},"age":{"N":"30"}}'
|
||||
|
||||
# Get an item
|
||||
aws dynamodb get-item \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--key '{"id":{"S":"user123"}}'
|
||||
|
||||
# Query items
|
||||
aws dynamodb query \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users \
|
||||
--key-condition-expression "id = :id" \
|
||||
--expression-attribute-values '{":id":{"S":"user123"}}'
|
||||
|
||||
# Scan table
|
||||
aws dynamodb scan \
|
||||
--endpoint-url http://localhost:8000 \
|
||||
--endpoint-url http://localhost:8002 \
|
||||
--table-name Users
|
||||
```
|
||||
|
||||
@@ -117,21 +119,6 @@ Binary encoding → Disk
|
||||
JSON response → Client
|
||||
```
|
||||
|
||||
### Module Structure
|
||||
|
||||
```
|
||||
jormundb/
|
||||
├── rocksdb/ - C FFI bindings to librocksdb
|
||||
├── dynamodb/ - Core types and operations
|
||||
│ ├── types.odin - AttributeValue, Item, Key, etc.
|
||||
│ ├── json.odin - DynamoDB JSON serialization
|
||||
│ ├── storage.odin - Storage engine with RocksDB
|
||||
│ └── handler.odin - HTTP request handlers
|
||||
├── key_codec/ - Binary key encoding (varint-prefixed)
|
||||
├── item_codec/ - Binary TLV item encoding
|
||||
└── main.odin - HTTP server and entry point
|
||||
```
|
||||
|
||||
### Storage Format
|
||||
|
||||
**Keys** (varint-length-prefixed segments):
|
||||
@@ -202,16 +189,38 @@ make run PORT=9000 DATA_DIR=/tmp/db VERBOSE=1
|
||||
|
||||
## Performance
|
||||
|
||||
From benchmarks on the original Zig version (Odin expected to be similar or better):
|
||||
Benchmarked on single node localhost, 1000 iterations per test.
|
||||
|
||||
```
|
||||
Sequential Writes | 10000 ops | 245.32 ms | 40765 ops/sec
|
||||
Random Reads | 10000 ops | 312.45 ms | 32006 ops/sec
|
||||
Batch Writes | 10000 ops | 89.23 ms | 112071 ops/sec
|
||||
PutItem | 5000 ops | 892.34 ms | 5604 ops/sec
|
||||
GetItem | 5000 ops | 678.91 ms | 7365 ops/sec
|
||||
Scan (full table) | 5000 ops | 234.56 ms | 21320 ops/sec
|
||||
```
|
||||
### Basic Operations
|
||||
|
||||
| Operation | Throughput | Avg Latency | P95 Latency | P99 Latency |
|
||||
|-----------|------------|-------------|-------------|-------------|
|
||||
| **PutItem** | 1,021 ops/sec | 0.98ms | 1.02ms | 1.64ms |
|
||||
| **GetItem** | 1,207 ops/sec | 0.83ms | 0.90ms | 1.14ms |
|
||||
| **Query** | 1,002 ops/sec | 1.00ms | 1.11ms | 1.85ms |
|
||||
| **Scan** (100 items) | 18,804 ops/sec | 0.05ms | - | - |
|
||||
| **DeleteItem** | 1,254 ops/sec | 0.80ms | - | - |
|
||||
|
||||
### Batch Operations
|
||||
|
||||
| Operation | Throughput | Batch Size |
|
||||
|-----------|------------|------------|
|
||||
| **BatchWriteItem** | 9,297 ops/sec | 25 items |
|
||||
| **BatchGetItem** | 9,113 ops/sec | 25 items |
|
||||
|
||||
### Concurrent Operations
|
||||
|
||||
| Workers | Throughput | Avg Latency | P95 Latency | P99 Latency |
|
||||
|---------|------------|-------------|-------------|-------------|
|
||||
| **10 concurrent** | 1,286 ops/sec | 7.70ms | 15.16ms | 19.72ms |
|
||||
|
||||
### Large Payloads
|
||||
|
||||
| Payload Size | Throughput | Avg Latency |
|
||||
|--------------|------------|-------------|
|
||||
| **10KB** | 522 ops/sec | 1.91ms |
|
||||
| **50KB** | 166 ops/sec | 6.01ms |
|
||||
| **100KB** | 96 ops/sec | 10.33ms |
|
||||
|
||||
## API Compatibility
|
||||
|
||||
@@ -226,24 +235,24 @@ Scan (full table) | 5000 ops | 234.56 ms | 21320 ops/sec
|
||||
- ✅ DeleteItem
|
||||
- ✅ Query (with KeyConditionExpression)
|
||||
- ✅ Scan (with pagination)
|
||||
- ✅ ConditionExpression
|
||||
- ✅ FilterExpression
|
||||
- ✅ ProjectionExpression
|
||||
- ✅ BatchWriteItem
|
||||
- ✅ BatchGetItem
|
||||
- ✅ Global Secondary Indexes
|
||||
|
||||
### Coming Soon
|
||||
|
||||
- ⏳ UpdateItem (with UpdateExpression)
|
||||
- ⏳ BatchWriteItem
|
||||
- ⏳ BatchGetItem
|
||||
- ⏳ Global Secondary Indexes
|
||||
- ⏳ UpdateItem (works but needs UPDATED_NEW/UPDATED_OLD response filtering to work for full Dynamo Parity)
|
||||
- ⏳ Local Secondary Indexes
|
||||
- ⏳ ConditionExpression
|
||||
- ⏳ FilterExpression
|
||||
- ⏳ ProjectionExpression
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
JORMUN_PORT=8000 # Server port
|
||||
JORMUN_PORT=8002 # Server port (I have something locally on port 8000 so now everyone has to use port 8002)
|
||||
JORMUN_HOST=0.0.0.0 # Bind address
|
||||
JORMUN_DATA_DIR=./data # RocksDB data directory
|
||||
JORMUN_VERBOSE=1 # Enable verbose logging
|
||||
@@ -275,7 +284,7 @@ chmod 755 ./data
|
||||
Check if the port is already in use:
|
||||
|
||||
```bash
|
||||
lsof -i :8000
|
||||
lsof -i :8002
|
||||
```
|
||||
|
||||
### "Invalid JSON" errors
|
||||
@@ -292,13 +301,9 @@ Ensure you're using the correct DynamoDB JSON format:
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details.
|
||||
|
||||
## Credits
|
||||
|
||||
- Inspired by DynamoDB Local
|
||||
- Inspired by DynamoDB
|
||||
- Built with [Odin](https://odin-lang.org/)
|
||||
- Powered by [RocksDB](https://rocksdb.org/)
|
||||
- Originally implemented as ZynamoDB in Zig
|
||||
@@ -314,4 +319,4 @@ Contributions welcome! Please:
|
||||
|
||||
---
|
||||
|
||||
**Why "Jormun"?** Jörmungandr, the World Serpent from Norse mythology—a fitting name for something that wraps around your data. Also, it sounds cool.
|
||||
**Why "Jormun"?** Jörmungandr, the World Serpent from Norse mythology, which I found fitting for something built in a language called Odin. Also, it sounds cool.
|
||||
|
||||
240
TODO.md
240
TODO.md
@@ -1,186 +1,78 @@
|
||||
# JormunDB Implementation TODO
|
||||
# JormunDB (Odin rewrite) — TODO
|
||||
|
||||
This tracks the rewrite from Zig to Odin and remaining features.
|
||||
This tracks what's left to stabilize + extend the project
|
||||
|
||||
## ✅ Completed
|
||||
|
||||
- [x] Project structure
|
||||
- [x] Makefile with build/run/test targets
|
||||
- [x] README with usage instructions
|
||||
- [x] ARCHITECTURE documentation
|
||||
- [x] RocksDB FFI bindings (rocksdb/rocksdb.odin)
|
||||
- [x] Core types (dynamodb/types.odin)
|
||||
- [x] Key codec with varint encoding (key_codec/key_codec.odin)
|
||||
- [x] Main entry point with arena pattern demo
|
||||
- [x] LICENSE file
|
||||
- [x] .gitignore
|
||||
## Now (MVP correctness + polish)
|
||||
Goal: "aws cli works reliably for CreateTable/ListTables/PutItem/GetItem/DeleteItem/Scan/Query" with correct DynamoDB-ish responses.
|
||||
|
||||
## 🚧 In Progress (Need to Complete)
|
||||
### 1) HTTP + routing hardening
|
||||
- [ ] Audit request parsing boundaries:
|
||||
- Max body size enforcement — **DONE**
|
||||
- Missing/invalid headers → correct DynamoDB error types
|
||||
- Content-Type handling (be permissive but consistent)
|
||||
- [x] Ensure **all request-scoped allocations** come from the request arena (no accidental long-lived allocs)
|
||||
- Verified: `handle_connection` in http.odin sets `context.allocator = request_alloc`
|
||||
- Long-lived data (table metadata, locks) explicitly uses `engine.allocator`
|
||||
- [x] Standardize error responses:
|
||||
- `__type` formatting — done, uses `com.amazonaws.dynamodb.v20120810#ErrorType`
|
||||
- `message` field consistency — done
|
||||
- Status code mapping per error type — **DONE**: centralized `handle_storage_error` + `make_error_response` now maps InternalServerError→500, everything else→400
|
||||
- Missing X-Amz-Target now returns `SerializationException` (matches real DynamoDB)
|
||||
|
||||
### Core Modules
|
||||
### 2) Storage correctness edge cases
|
||||
- [x] Table metadata durability + validation:
|
||||
- [x] Reject duplicate tables — done in `create_table` (checks existing meta key)
|
||||
- [x] Reject invalid key schema — done in `parse_key_schema` (no HASH, multiple HASH, etc.)
|
||||
- [x] Item validation against key schema:
|
||||
- [x] Missing PK/SK errors — done in `key_from_item`
|
||||
- [x] Type mismatch errors (S/N/B) — **DONE**: new `validate_item_key_types` proc checks item key attr types against AttributeDefinitions
|
||||
- [ ] Deterministic encoding tests:
|
||||
- [ ] Key codec round-trip
|
||||
- [ ] TLV item encode/decode round-trip (nested maps/lists/sets)
|
||||
|
||||
- [ ] **dynamodb/json.odin** - DynamoDB JSON parsing and serialization
|
||||
- Parse `{"S": "value"}` format
|
||||
- Serialize AttributeValue to DynamoDB JSON
|
||||
- Parse request bodies (PutItem, GetItem, etc.)
|
||||
### 3) Query/Scan pagination parity
|
||||
- [x] Make pagination behavior match AWS CLI expectations:
|
||||
- [x] `Limit` — done
|
||||
- [x] `ExclusiveStartKey` — done (parsed via JSON object lookup with key schema type reconstruction)
|
||||
- [x] `LastEvaluatedKey` generation — **FIXED**: now saves key of *last returned item* (not next unread item); only emits when more results exist
|
||||
- [ ] Add "golden" pagination tests:
|
||||
- [ ] Query w/ sort key ranges
|
||||
- [ ] Scan limit + resume loop
|
||||
|
||||
- [ ] **item_codec/item_codec.odin** - Binary TLV encoding for items
|
||||
- Encode Item to binary TLV format
|
||||
- Decode binary TLV back to Item
|
||||
- Type tag handling for all DynamoDB types
|
||||
### 4) Expression parsing reliability
|
||||
- [x] Remove brittle string-scanning for `KeyConditionExpression` extraction:
|
||||
- **DONE**: `parse_key_condition_expression_string` uses JSON object lookup (handles whitespace/ordering safely)
|
||||
- [ ] Add validation + better errors for malformed expressions
|
||||
- [x] Expand operator coverage: BETWEEN and begins_with are implemented in parser
|
||||
- [x] **Sort key condition filtering in query** — **DONE**: `query()` now accepts optional `Sort_Key_Condition` and applies it (=, <, <=, >, >=, BETWEEN, begins_with)
|
||||
|
||||
- [ ] **dynamodb/storage.odin** - Storage engine with RocksDB
|
||||
- Table metadata management
|
||||
- create_table, delete_table, describe_table, list_tables
|
||||
- put_item, get_item, delete_item
|
||||
- scan, query with pagination
|
||||
- Table-level RW locks
|
||||
### 5) Service Features
|
||||
- [ ] Configuration settings like environment variables for defining users and credentials
|
||||
- [ ] Configuration settings for setting up master and replica nodes
|
||||
|
||||
- [ ] **dynamodb/handler.odin** - HTTP request handlers
|
||||
- Route X-Amz-Target to handler functions
|
||||
- handle_create_table, handle_put_item, etc.
|
||||
- Build responses with proper error handling
|
||||
- Arena allocator integration
|
||||
### 6) Test coverage / tooling
|
||||
- [ ] Add integration tests mirroring AWS CLI script flows:
|
||||
- create table → put → get → scan → query → delete
|
||||
- [ ] Add fuzz-ish tests for:
|
||||
- JSON parsing robustness
|
||||
- expression parsing robustness
|
||||
- TLV decode failure cases (corrupt bytes)
|
||||
|
||||
### HTTP Server
|
||||
### 7) Secondary indexes
|
||||
- [x] Global Secondary Indexes (GSI)
|
||||
- [ ] Local Secondary Indexes (LSI)
|
||||
- [ ] Index backfill (existing data when GSI added to populated table)
|
||||
- [x] Write-path maintenance (GSI)
|
||||
|
||||
- [ ] **HTTP server implementation**
|
||||
- Accept TCP connections
|
||||
- Parse HTTP POST requests
|
||||
- Read JSON bodies
|
||||
- Send HTTP responses with headers
|
||||
- Keep-alive support
|
||||
- Options:
|
||||
- Use `core:net` directly
|
||||
- Use C FFI with libmicrohttpd
|
||||
- Use Odin's vendor:microui (if suitable)
|
||||
### 8) Performance / ops
|
||||
- [ ] Connection reuse / keep-alive tuning
|
||||
- [ ] Bloom filters / RocksDB options tuning for common patterns
|
||||
- [ ] Optional compression policy (LZ4/Zstd knobs)
|
||||
- [ ] Parallel scan (segment scanning)
|
||||
|
||||
### Expression Parsers (Priority 3)
|
||||
|
||||
- [ ] **KeyConditionExpression parser**
|
||||
- Tokenizer for expressions
|
||||
- Parse `pk = :pk AND sk > :sk`
|
||||
- Support begins_with, BETWEEN
|
||||
- ExpressionAttributeNames/Values
|
||||
|
||||
- [ ] **UpdateExpression parser** (later)
|
||||
- SET operations
|
||||
- REMOVE operations
|
||||
- ADD operations
|
||||
- DELETE operations
|
||||
|
||||
## 📋 Testing
|
||||
|
||||
- [ ] Unit tests for key_codec
|
||||
- [ ] Unit tests for item_codec
|
||||
- [ ] Unit tests for JSON parsing
|
||||
- [ ] Integration tests with AWS CLI
|
||||
- [ ] Benchmark suite
|
||||
|
||||
## 🔧 Build & Tooling
|
||||
|
||||
- [ ] Verify Makefile works on macOS
|
||||
- [ ] Verify Makefile works on Linux
|
||||
- [ ] Add Docker support (optional)
|
||||
- [ ] Add install script
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [ ] Code comments for public APIs
|
||||
- [ ] Usage examples in README
|
||||
- [ ] API compatibility matrix
|
||||
- [ ] Performance tuning guide
|
||||
|
||||
## 🎯 Priority Order
|
||||
|
||||
1. **HTTP Server** - Need this to accept requests
|
||||
2. **JSON Parsing** - Need this to understand DynamoDB format
|
||||
3. **Storage Engine** - Core CRUD operations
|
||||
4. **Handlers** - Wire everything together
|
||||
5. **Item Codec** - Efficient binary storage
|
||||
6. **Expression Parsers** - Query functionality
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
### Zig → Odin Translation Patterns
|
||||
|
||||
**Memory Management:**
|
||||
```zig
|
||||
// Zig
|
||||
const item = try allocator.create(Item);
|
||||
defer allocator.destroy(item);
|
||||
```
|
||||
```odin
|
||||
// Odin
|
||||
item := new(Item)
|
||||
// No defer needed if using arena
|
||||
```
|
||||
|
||||
**Error Handling:**
|
||||
```zig
|
||||
// Zig
|
||||
fn foo() !Result {
|
||||
return error.Failed;
|
||||
}
|
||||
const x = try foo();
|
||||
```
|
||||
```odin
|
||||
// Odin
|
||||
foo :: proc() -> (Result, bool) {
|
||||
return {}, false
|
||||
}
|
||||
x := foo() or_return
|
||||
```
|
||||
|
||||
**Slices:**
|
||||
```zig
|
||||
// Zig
|
||||
const slice: []const u8 = data;
|
||||
```
|
||||
```odin
|
||||
// Odin
|
||||
slice: []byte = data
|
||||
```
|
||||
|
||||
**Maps:**
|
||||
```zig
|
||||
// Zig
|
||||
var map = std.StringHashMap(Value).init(allocator);
|
||||
defer map.deinit();
|
||||
```
|
||||
```odin
|
||||
// Odin
|
||||
map := make(map[string]Value)
|
||||
defer delete(map)
|
||||
```
|
||||
|
||||
### Key Decisions
|
||||
|
||||
1. **Use `Maybe(T)` instead of `?T`** - Odin's optional type
|
||||
2. **Use `or_return` instead of `try`** - Odin's error propagation
|
||||
3. **Use `context.allocator`** - Implicit allocator from context
|
||||
4. **Use `#partial switch`** - For union type checking
|
||||
5. **Use `transmute`** - For zero-cost type conversions
|
||||
|
||||
### Reference Zig Files
|
||||
|
||||
When implementing, reference these Zig files:
|
||||
- `src/dynamodb/json.zig` - 400 lines, DynamoDB JSON format
|
||||
- `src/dynamodb/storage.zig` - 460 lines, storage engine
|
||||
- `src/dynamodb/handler.zig` - 500+ lines, request handlers
|
||||
- `src/item_codec.zig` - 350 lines, TLV encoding
|
||||
- `src/http.zig` - 250 lines, HTTP server
|
||||
|
||||
### Quick Test Commands
|
||||
|
||||
```bash
|
||||
# Build and test
|
||||
make build
|
||||
make test
|
||||
|
||||
# Run server
|
||||
make run
|
||||
|
||||
# Test with AWS CLI
|
||||
aws dynamodb list-tables --endpoint-url http://localhost:8000
|
||||
```
|
||||
### 9) Replication / WAL
|
||||
(There is a C++ shim stubbed out for WAL iteration and applying write batches.)
|
||||
- [ ] Implement WAL iterator: `latest_sequence`, `wal_iter_next` returning writebatch blob
|
||||
- [ ] Implement apply-writebatch on follower
|
||||
- [ ] Add a minimal replication test harness (leader generates N ops → follower applies → compare)
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Output file
|
||||
OUTPUT_FILE="project_context.txt"
|
||||
OUTPUT_FILE="jormundb-odin-project_context.txt"
|
||||
|
||||
# Directories to exclude
|
||||
EXCLUDE_DIRS=("build" "data" ".git")
|
||||
|
||||
# File extensions to include (add more as needed)
|
||||
INCLUDE_EXTENSIONS=("odin" "Makefile" "md")
|
||||
INCLUDE_EXTENSIONS=("odin" "Makefile" "md" "json" "h" "cc")
|
||||
|
||||
# Special files to include (without extension)
|
||||
INCLUDE_FILES=("ols.json" "Makefile" "build.odin.zon")
|
||||
INCLUDE_FILES=("Makefile")
|
||||
|
||||
# Clear the output file
|
||||
> "$OUTPUT_FILE"
|
||||
|
||||
262
dynamodb/batch.odin
Normal file
262
dynamodb/batch.odin
Normal file
@@ -0,0 +1,262 @@
|
||||
// BatchWriteItem and BatchGetItem storage operations
|
||||
//
|
||||
// BatchWriteItem: Puts or deletes multiple items across one or more tables.
|
||||
// - Up to 25 items per batch (DynamoDB limit)
|
||||
// - Each item is an independent PutRequest or DeleteRequest
|
||||
// - Partial failures are reported via UnprocessedItems
|
||||
//
|
||||
// BatchGetItem: Retrieves multiple items from one or more tables.
|
||||
// - Up to 100 items per batch (DynamoDB limit)
|
||||
// - Each table request contains a list of Keys
|
||||
// - Partial failures reported via UnprocessedKeys
|
||||
package dynamodb
|
||||
|
||||
// ============================================================================
|
||||
// BatchWriteItem Types
|
||||
// ============================================================================
|
||||
|
||||
Write_Request_Type :: enum {
|
||||
Put,
|
||||
Delete,
|
||||
}
|
||||
|
||||
Write_Request :: struct {
|
||||
type: Write_Request_Type,
|
||||
item: Item, // For Put: the full item. For Delete: the key item.
|
||||
}
|
||||
|
||||
Batch_Write_Table_Request :: struct {
|
||||
table_name: string,
|
||||
requests: []Write_Request,
|
||||
}
|
||||
|
||||
Batch_Write_Result :: struct {
|
||||
// UnprocessedItems — requests that failed and should be retried.
|
||||
// For now we process everything or return an error, so this is
|
||||
// typically empty. Populated only on partial failures.
|
||||
unprocessed: [dynamic]Batch_Write_Table_Request,
|
||||
}
|
||||
|
||||
batch_write_result_destroy :: proc(result: ^Batch_Write_Result) {
|
||||
for &table_req in result.unprocessed {
|
||||
for &req in table_req.requests {
|
||||
item_destroy(&req.item)
|
||||
}
|
||||
delete(table_req.requests)
|
||||
}
|
||||
delete(result.unprocessed)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// BatchWriteItem — Execute a batch of put/delete operations
|
||||
//
|
||||
// DynamoDB semantics:
|
||||
// - Operations within a batch are NOT atomic (some may succeed, some fail)
|
||||
// - Each operation is validated independently
|
||||
// - Failed operations go into UnprocessedItems
|
||||
// - Limit: 25 operations total across all tables
|
||||
// ============================================================================
|
||||
|
||||
batch_write_item :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_requests: []Batch_Write_Table_Request,
|
||||
) -> (Batch_Write_Result, Storage_Error) {
|
||||
result := Batch_Write_Result{
|
||||
unprocessed = make([dynamic]Batch_Write_Table_Request),
|
||||
}
|
||||
|
||||
// Count total operations across all tables
|
||||
total_ops := 0
|
||||
for table_req in table_requests {
|
||||
total_ops += len(table_req.requests)
|
||||
}
|
||||
|
||||
// Enforce DynamoDB limit: 25 operations per batch
|
||||
if total_ops > 25 {
|
||||
return result, .Validation_Error
|
||||
}
|
||||
|
||||
for table_req in table_requests {
|
||||
failed_requests := make([dynamic]Write_Request)
|
||||
|
||||
for req in table_req.requests {
|
||||
var_err: Storage_Error
|
||||
|
||||
switch req.type {
|
||||
case .Put:
|
||||
var_err = put_item(engine, table_req.table_name, req.item)
|
||||
case .Delete:
|
||||
var_err = delete_item(engine, table_req.table_name, req.item)
|
||||
}
|
||||
|
||||
// Distinguish validation errors from transient failures
|
||||
if var_err != .None {
|
||||
#partial switch var_err {
|
||||
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
|
||||
// Hard validation errors — fail the entire batch
|
||||
batch_write_result_destroy(&result)
|
||||
delete(failed_requests)
|
||||
return result, var_err
|
||||
|
||||
case .Table_Not_Found:
|
||||
// Non-existent table is a hard request failure, not a retryable condition.
|
||||
// DynamoDB returns ResourceNotFoundException for the whole request.
|
||||
batch_write_result_destroy(&result)
|
||||
delete(failed_requests)
|
||||
return result, .Table_Not_Found
|
||||
|
||||
case .RocksDB_Error, .Item_Not_Found:
|
||||
// Genuinely transient/infrastructure errors — add to UnprocessedItems.
|
||||
failed_item := item_deep_copy(req.item)
|
||||
append(&failed_requests, Write_Request{
|
||||
type = req.type,
|
||||
item = failed_item,
|
||||
})
|
||||
|
||||
case .None, .Validation_Error, .Internal_Error:
|
||||
// Should not happen, but handle gracefully
|
||||
failed_item := item_deep_copy(req.item)
|
||||
append(&failed_requests, Write_Request{
|
||||
type = req.type,
|
||||
item = failed_item,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed_requests) > 0 {
|
||||
append(&result.unprocessed, Batch_Write_Table_Request{
|
||||
table_name = table_req.table_name,
|
||||
requests = failed_requests[:],
|
||||
})
|
||||
} else {
|
||||
delete(failed_requests)
|
||||
}
|
||||
}
|
||||
|
||||
return result, .None
|
||||
}
|
||||
|
||||
|
||||
// ============================================================================
|
||||
// BatchGetItem Types
|
||||
// ============================================================================
|
||||
|
||||
Batch_Get_Table_Request :: struct {
|
||||
table_name: string,
|
||||
keys: []Item,
|
||||
}
|
||||
|
||||
Batch_Get_Table_Result :: struct {
|
||||
table_name: string,
|
||||
items: []Item,
|
||||
}
|
||||
|
||||
Batch_Get_Result :: struct {
|
||||
responses: [dynamic]Batch_Get_Table_Result,
|
||||
unprocessed_keys: [dynamic]Batch_Get_Table_Request,
|
||||
}
|
||||
|
||||
batch_get_result_destroy :: proc(result: ^Batch_Get_Result) {
|
||||
for &table_result in result.responses {
|
||||
for &item in table_result.items {
|
||||
item_destroy(&item)
|
||||
}
|
||||
delete(table_result.items)
|
||||
}
|
||||
delete(result.responses)
|
||||
|
||||
for &table_req in result.unprocessed_keys {
|
||||
for &key in table_req.keys {
|
||||
item_destroy(&key)
|
||||
}
|
||||
delete(table_req.keys)
|
||||
}
|
||||
delete(result.unprocessed_keys)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// BatchGetItem — Retrieve multiple items from one or more tables
|
||||
//
|
||||
// DynamoDB semantics:
|
||||
// - Each key is fetched independently
|
||||
// - Missing items are silently omitted (no error)
|
||||
// - Failed lookups go into UnprocessedKeys
|
||||
// - Limit: 100 keys total across all tables
|
||||
// ============================================================================
|
||||
|
||||
batch_get_item :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_requests: []Batch_Get_Table_Request,
|
||||
) -> (Batch_Get_Result, Storage_Error) {
|
||||
result := Batch_Get_Result{
|
||||
responses = make([dynamic]Batch_Get_Table_Result),
|
||||
unprocessed_keys = make([dynamic]Batch_Get_Table_Request),
|
||||
}
|
||||
|
||||
// Count total keys across all tables
|
||||
total_keys := 0
|
||||
for table_req in table_requests {
|
||||
total_keys += len(table_req.keys)
|
||||
}
|
||||
|
||||
// Enforce DynamoDB limit: 100 keys per batch
|
||||
if total_keys > 100 {
|
||||
return result, .Validation_Error
|
||||
}
|
||||
|
||||
for table_req in table_requests {
|
||||
found_items := make([dynamic]Item)
|
||||
failed_keys := make([dynamic]Item)
|
||||
|
||||
for key in table_req.keys {
|
||||
item_result, get_err := get_item(engine, table_req.table_name, key)
|
||||
|
||||
// Distinguish validation errors from transient failures
|
||||
if get_err != .None && get_err != .Item_Not_Found {
|
||||
#partial switch get_err {
|
||||
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
|
||||
// Hard validation error — fail the entire batch
|
||||
batch_get_result_destroy(&result)
|
||||
delete(found_items)
|
||||
delete(failed_keys)
|
||||
return result, get_err
|
||||
|
||||
case .RocksDB_Error, .Table_Not_Found:
|
||||
// Transient error — add to unprocessed
|
||||
append(&failed_keys, item_deep_copy(key))
|
||||
continue
|
||||
|
||||
case .None, .Validation_Error, .Internal_Error, .Item_Not_Found:
|
||||
// Should not happen here, but handle gracefully
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if item_val, has_item := item_result.?; has_item {
|
||||
append(&found_items, item_val)
|
||||
}
|
||||
// If item not found, silently omit (DynamoDB behavior)
|
||||
}
|
||||
|
||||
if len(found_items) > 0 {
|
||||
append(&result.responses, Batch_Get_Table_Result{
|
||||
table_name = table_req.table_name,
|
||||
items = found_items[:],
|
||||
})
|
||||
} else {
|
||||
delete(found_items)
|
||||
}
|
||||
|
||||
if len(failed_keys) > 0 {
|
||||
append(&result.unprocessed_keys, Batch_Get_Table_Request{
|
||||
table_name = table_req.table_name,
|
||||
keys = failed_keys[:],
|
||||
})
|
||||
} else {
|
||||
delete(failed_keys)
|
||||
}
|
||||
}
|
||||
|
||||
return result, .None
|
||||
}
|
||||
119
dynamodb/condition.odin
Normal file
119
dynamodb/condition.odin
Normal file
@@ -0,0 +1,119 @@
|
||||
// ConditionExpression support for PutItem, DeleteItem, and UpdateItem
|
||||
//
|
||||
// ConditionExpression uses the same grammar as FilterExpression but is evaluated
|
||||
// against the *existing* item (before the mutation). If the condition evaluates
|
||||
// to false, the operation is rejected with ConditionalCheckFailedException.
|
||||
//
|
||||
// When there is no existing item:
|
||||
// - attribute_not_exists(path) → true (attribute doesn't exist on a non-existent item)
|
||||
// - attribute_exists(path) → false
|
||||
// - All comparisons → false (no attribute to compare)
|
||||
//
|
||||
// This file provides:
|
||||
// 1. parse_condition_expression_string — extract ConditionExpression from JSON body
|
||||
// 2. evaluate_condition — evaluate parsed condition against an item
|
||||
// 3. Condition_Result — result enum for condition evaluation
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:strings"
|
||||
|
||||
// ============================================================================
|
||||
// Condition Evaluation Result
|
||||
// ============================================================================
|
||||
|
||||
Condition_Result :: enum {
|
||||
Passed, // Condition met (or no condition specified)
|
||||
Failed, // Condition not met → ConditionalCheckFailedException
|
||||
Parse_Error, // Malformed ConditionExpression → ValidationException
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Request Parsing
|
||||
// ============================================================================
|
||||
|
||||
// Extract the raw ConditionExpression string from the request body.
|
||||
parse_condition_expression_string :: proc(request_body: []byte) -> (expr: string, ok: bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return
|
||||
}
|
||||
|
||||
ce_val, found := root["ConditionExpression"]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
ce_str, str_ok := ce_val.(json.String)
|
||||
if !str_ok {
|
||||
return
|
||||
}
|
||||
|
||||
expr = strings.clone(string(ce_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Full Condition Evaluation Pipeline
|
||||
//
|
||||
// Parses ConditionExpression + ExpressionAttributeNames/Values from the
|
||||
// request body, then evaluates against the existing item.
|
||||
//
|
||||
// Parameters:
|
||||
// request_body — full JSON request body
|
||||
// existing_item — the item currently in the database (nil if no item exists)
|
||||
// attr_names — pre-parsed ExpressionAttributeNames (caller may already have these)
|
||||
// attr_values — pre-parsed ExpressionAttributeValues
|
||||
//
|
||||
// Returns Condition_Result:
|
||||
// .Passed — no ConditionExpression, or condition evaluated to true
|
||||
// .Failed — condition evaluated to false
|
||||
// .Parse_Error — ConditionExpression is malformed
|
||||
// ============================================================================
|
||||
|
||||
evaluate_condition_expression :: proc(
|
||||
request_body: []byte,
|
||||
existing_item: Maybe(Item),
|
||||
attr_names: Maybe(map[string]string),
|
||||
attr_values: map[string]Attribute_Value,
|
||||
) -> Condition_Result {
|
||||
// Extract ConditionExpression string
|
||||
condition_str, has_condition := parse_condition_expression_string(request_body)
|
||||
if !has_condition {
|
||||
return .Passed // No condition → always pass
|
||||
}
|
||||
defer delete(condition_str)
|
||||
|
||||
// Parse the condition into a filter tree (same grammar as FilterExpression)
|
||||
filter_node, parse_ok := parse_filter_expression(condition_str, attr_names, attr_values)
|
||||
if !parse_ok || filter_node == nil {
|
||||
return .Parse_Error
|
||||
}
|
||||
defer {
|
||||
filter_node_destroy(filter_node)
|
||||
}
|
||||
|
||||
// If there is no existing item, build an empty item for evaluation.
|
||||
// This means attribute_not_exists → true, attribute_exists → false,
|
||||
// all comparisons → false (attribute not found).
|
||||
eval_item: Item
|
||||
if item, has_item := existing_item.?; has_item {
|
||||
eval_item = item
|
||||
} else {
|
||||
// Empty item — no attributes exist
|
||||
eval_item = Item{}
|
||||
}
|
||||
|
||||
if evaluate_filter(eval_item, filter_node) {
|
||||
return .Passed
|
||||
}
|
||||
|
||||
return .Failed
|
||||
}
|
||||
520
dynamodb/expression.odin
Normal file
520
dynamodb/expression.odin
Normal file
@@ -0,0 +1,520 @@
|
||||
// DynamoDB Expression Parser
|
||||
// Parses KeyConditionExpression with ExpressionAttributeNames and ExpressionAttributeValues
|
||||
// Supports: pk = :pk, pk = :pk AND sk > :sk, begins_with(sk, :prefix), BETWEEN, etc.
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:strings"
|
||||
|
||||
// ============================================================================
|
||||
// Sort Key Condition Operators
|
||||
// ============================================================================
|
||||
|
||||
Sort_Key_Operator :: enum {
|
||||
EQ, // =
|
||||
LT, // <
|
||||
LE, // <=
|
||||
GT, // >
|
||||
GE, // >=
|
||||
BETWEEN, // BETWEEN x AND y
|
||||
BEGINS_WITH, // begins_with(sk, prefix)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Parsed Structures
|
||||
// ============================================================================
|
||||
|
||||
Sort_Key_Condition :: struct {
|
||||
sk_name: string,
|
||||
operator: Sort_Key_Operator,
|
||||
value: Attribute_Value,
|
||||
value2: Maybe(Attribute_Value),
|
||||
}
|
||||
|
||||
sort_key_condition_destroy :: proc(skc: ^Sort_Key_Condition) {
|
||||
delete(skc.sk_name) // Free the cloned string
|
||||
attr_value_destroy(&skc.value)
|
||||
if v2, ok := skc.value2.?; ok {
|
||||
v2_copy := v2
|
||||
attr_value_destroy(&v2_copy)
|
||||
}
|
||||
}
|
||||
|
||||
Key_Condition :: struct {
|
||||
pk_name: string,
|
||||
pk_value: Attribute_Value,
|
||||
sk_condition: Maybe(Sort_Key_Condition),
|
||||
}
|
||||
|
||||
key_condition_destroy :: proc(kc: ^Key_Condition) {
|
||||
delete(kc.pk_name) // Free the cloned string
|
||||
attr_value_destroy(&kc.pk_value)
|
||||
if skc, ok := kc.sk_condition.?; ok {
|
||||
skc_copy := skc
|
||||
sort_key_condition_destroy(&skc_copy)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the raw partition key value bytes for building storage keys
|
||||
key_condition_get_pk_bytes :: proc(kc: ^Key_Condition) -> ([]byte, bool) {
|
||||
#partial switch v in kc.pk_value {
|
||||
case String:
|
||||
return transmute([]byte)string(v), true
|
||||
case DDB_Number:
|
||||
// Use canonical encoding for numbers in keys!
|
||||
return encode_ddb_number_for_sort(v), true
|
||||
case Binary:
|
||||
return transmute([]byte)string(v), true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tokenizer
|
||||
// ============================================================================
|
||||
|
||||
Tokenizer :: struct {
|
||||
input: string,
|
||||
pos: int,
|
||||
}
|
||||
|
||||
tokenizer_init :: proc(input: string) -> Tokenizer {
|
||||
return Tokenizer{input = input, pos = 0}
|
||||
}
|
||||
|
||||
tokenizer_next :: proc(t: ^Tokenizer) -> Maybe(string) {
|
||||
// Skip whitespace
|
||||
for t.pos < len(t.input) && is_whitespace(t.input[t.pos]) {
|
||||
t.pos += 1
|
||||
}
|
||||
|
||||
if t.pos >= len(t.input) {
|
||||
return nil
|
||||
}
|
||||
|
||||
start := t.pos
|
||||
c := t.input[t.pos]
|
||||
|
||||
// Single-character tokens
|
||||
if c == '(' || c == ')' || c == ',' {
|
||||
t.pos += 1
|
||||
return t.input[start:t.pos]
|
||||
}
|
||||
|
||||
// Two-character operators
|
||||
if t.pos + 1 < len(t.input) {
|
||||
two := t.input[t.pos:t.pos + 2]
|
||||
if two == "<=" || two == ">=" || two == "<>" {
|
||||
t.pos += 2
|
||||
return two
|
||||
}
|
||||
}
|
||||
|
||||
// Single-character operators
|
||||
if c == '=' || c == '<' || c == '>' || c == '+' || c == '-' {
|
||||
t.pos += 1
|
||||
return t.input[start:t.pos]
|
||||
}
|
||||
|
||||
// Identifier or keyword (includes :placeholder and #name)
|
||||
for t.pos < len(t.input) && is_ident_char(t.input[t.pos]) {
|
||||
t.pos += 1
|
||||
}
|
||||
|
||||
if t.pos > start {
|
||||
return t.input[start:t.pos]
|
||||
}
|
||||
|
||||
// Unknown character, skip it
|
||||
t.pos += 1
|
||||
return tokenizer_next(t)
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
is_whitespace :: proc(c: byte) -> bool {
|
||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
is_ident_char :: proc(c: byte) -> bool {
|
||||
return (c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == ':' || c == '#' || c == '.'
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helper: convert Maybe(string) tokens into (string, bool) so or_return works.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Package-visible: used by update.odin and filter.odin
|
||||
next_token :: proc(t: ^Tokenizer) -> (tok: string, ok: bool) {
|
||||
if v, has := tokenizer_next(t).?; has {
|
||||
tok = v
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Expression Parsing
|
||||
// ============================================================================
|
||||
|
||||
parse_key_condition_expression :: proc(
|
||||
expression: string,
|
||||
attribute_names: Maybe(map[string]string),
|
||||
attribute_values: map[string]Attribute_Value,
|
||||
) -> (kc: Key_Condition, ok: bool) {
|
||||
t := tokenizer_init(expression)
|
||||
|
||||
pk_name_token := next_token(&t) or_return
|
||||
pk_name_unowned := resolve_attribute_name(pk_name_token, attribute_names) or_return
|
||||
pk_name := strings.clone(pk_name_unowned) // Clone for safe storage
|
||||
|
||||
eq_token := next_token(&t) or_return
|
||||
if eq_token != "=" {
|
||||
delete(pk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
pk_value_token := next_token(&t) or_return
|
||||
pk_value, pk_ok := resolve_attribute_value(pk_value_token, attribute_values)
|
||||
if !pk_ok {
|
||||
delete(pk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
sk_condition: Maybe(Sort_Key_Condition) = nil
|
||||
|
||||
// Optional "AND ..."
|
||||
if and_token, has_and := tokenizer_next(&t).?; has_and {
|
||||
if !strings.equal_fold(and_token, "AND") {
|
||||
delete(pk_name) // free on error
|
||||
attr_value_destroy(&pk_value)
|
||||
return
|
||||
}
|
||||
|
||||
skc, skc_ok := parse_sort_key_condition(&t, attribute_names, attribute_values)
|
||||
if !skc_ok {
|
||||
delete(pk_name) // free on error
|
||||
attr_value_destroy(&pk_value)
|
||||
return
|
||||
}
|
||||
sk_condition = skc
|
||||
}
|
||||
|
||||
// Verify all tokens were consumed (no trailing garbage)
|
||||
if trailing := tokenizer_next(&t); trailing != nil {
|
||||
delete(pk_name)
|
||||
attr_value_destroy(&pk_value)
|
||||
if skc, has_skc := sk_condition.?; has_skc {
|
||||
skc_copy := skc
|
||||
sort_key_condition_destroy(&skc_copy)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
kc = Key_Condition{
|
||||
pk_name = pk_name,
|
||||
pk_value = pk_value,
|
||||
sk_condition = sk_condition,
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
parse_sort_key_condition :: proc(
|
||||
t: ^Tokenizer,
|
||||
attribute_names: Maybe(map[string]string),
|
||||
attribute_values: map[string]Attribute_Value,
|
||||
) -> (skc: Sort_Key_Condition, ok: bool) {
|
||||
first_token := next_token(t) or_return
|
||||
|
||||
if strings.equal_fold(first_token, "begins_with") {
|
||||
skc, ok = parse_begins_with(t, attribute_names, attribute_values)
|
||||
return
|
||||
}
|
||||
|
||||
sk_name_unowned := resolve_attribute_name(first_token, attribute_names) or_return
|
||||
sk_name := strings.clone(sk_name_unowned) // Clone for safe storage
|
||||
|
||||
op_token := next_token(t) or_return
|
||||
operator, op_ok := parse_operator(op_token)
|
||||
if !op_ok {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
value_token := next_token(t) or_return
|
||||
value, val_ok := resolve_attribute_value(value_token, attribute_values)
|
||||
if !val_ok {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
value2: Maybe(Attribute_Value) = nil
|
||||
if operator == .BETWEEN {
|
||||
// IMPORTANT: after allocating `value`, do NOT use `or_return` without cleanup.
|
||||
and_token, tok_ok := next_token(t)
|
||||
if !tok_ok || !strings.equal_fold(and_token, "AND") {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
|
||||
value2_token, tok2_ok := next_token(t)
|
||||
if !tok2_ok {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
|
||||
v2, v2_ok := resolve_attribute_value(value2_token, attribute_values)
|
||||
if !v2_ok {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
value2 = v2
|
||||
}
|
||||
|
||||
skc = Sort_Key_Condition{
|
||||
sk_name = sk_name,
|
||||
operator = operator,
|
||||
value = value,
|
||||
value2 = value2,
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
parse_begins_with :: proc(
|
||||
t: ^Tokenizer,
|
||||
attribute_names: Maybe(map[string]string),
|
||||
attribute_values: map[string]Attribute_Value,
|
||||
) -> (skc: Sort_Key_Condition, ok: bool) {
|
||||
lparen := next_token(t) or_return
|
||||
if lparen != "(" {
|
||||
return
|
||||
}
|
||||
|
||||
sk_name_token := next_token(t) or_return
|
||||
sk_name_unowned := resolve_attribute_name(sk_name_token, attribute_names) or_return
|
||||
sk_name := strings.clone(sk_name_unowned) // Clone for safe storage
|
||||
|
||||
comma := next_token(t) or_return
|
||||
if comma != "," {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
value_token := next_token(t) or_return
|
||||
value, val_ok := resolve_attribute_value(value_token, attribute_values)
|
||||
if !val_ok {
|
||||
delete(sk_name) // free on error
|
||||
return
|
||||
}
|
||||
|
||||
// after allocating `value`, avoid `or_return` so we can clean up
|
||||
rparen, tok_ok := next_token(t)
|
||||
if !tok_ok || rparen != ")" {
|
||||
delete(sk_name) // free on error
|
||||
attr_value_destroy(&value)
|
||||
return
|
||||
}
|
||||
|
||||
skc = Sort_Key_Condition{
|
||||
sk_name = sk_name,
|
||||
operator = .BEGINS_WITH,
|
||||
value = value,
|
||||
value2 = nil,
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
parse_operator :: proc(token: string) -> (Sort_Key_Operator, bool) {
|
||||
if token == "=" do return .EQ, true
|
||||
if token == "<" do return .LT, true
|
||||
if token == "<=" do return .LE, true
|
||||
if token == ">" do return .GT, true
|
||||
if token == ">=" do return .GE, true
|
||||
if strings.equal_fold(token, "BETWEEN") do return .BETWEEN, true
|
||||
return .EQ, false
|
||||
}
|
||||
|
||||
// Package-visible: used by update.odin and filter.odin
|
||||
resolve_attribute_name :: proc(token: string, names: Maybe(map[string]string)) -> (string, bool) {
|
||||
if len(token) > 0 && token[0] == '#' {
|
||||
if n, has_names := names.?; has_names {
|
||||
if resolved, found := n[token]; found {
|
||||
return resolved, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
return token, true
|
||||
}
|
||||
|
||||
// Package-visible: used by update.odin and filter.odin
|
||||
resolve_attribute_value :: proc(
|
||||
token: string,
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (Attribute_Value, bool) {
|
||||
if len(token) > 0 && token[0] == ':' {
|
||||
if original, found := values[token]; found {
|
||||
return attr_value_deep_copy(original), true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Request Parsing Helpers
|
||||
// ============================================================================
|
||||
|
||||
parse_expression_attribute_names :: proc(request_body: []byte) -> Maybe(map[string]string) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return nil
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, ok := data.(json.Object)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
names_val, found := root["ExpressionAttributeNames"]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
|
||||
names_obj, names_ok := names_val.(json.Object)
|
||||
if !names_ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make(map[string]string)
|
||||
|
||||
for key, val in names_obj {
|
||||
str, str_ok := val.(json.String)
|
||||
if !str_ok {
|
||||
continue
|
||||
}
|
||||
result[strings.clone(key)] = strings.clone(string(str))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
parse_expression_attribute_values :: proc(request_body: []byte) -> (map[string]Attribute_Value, bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, ok := data.(json.Object)
|
||||
if !ok {
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
|
||||
values_val, found := root["ExpressionAttributeValues"]
|
||||
if !found {
|
||||
return make(map[string]Attribute_Value), true
|
||||
}
|
||||
|
||||
values_obj, values_ok := values_val.(json.Object)
|
||||
if !values_ok {
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
|
||||
result := make(map[string]Attribute_Value)
|
||||
|
||||
for key, val in values_obj {
|
||||
attr, attr_ok := parse_attribute_value(val)
|
||||
if !attr_ok {
|
||||
// Clean up already-parsed values before returning error
|
||||
for k, &v in result {
|
||||
attr_value_destroy(&v)
|
||||
delete(k)
|
||||
}
|
||||
delete(result)
|
||||
return make(map[string]Attribute_Value), false
|
||||
}
|
||||
result[strings.clone(key)] = attr
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FIX: Use JSON object lookup instead of fragile string scanning.
|
||||
// This handles whitespace, field ordering, and escape sequences correctly.
|
||||
// ============================================================================
|
||||
parse_key_condition_expression_string :: proc(request_body: []byte) -> (expr: string, ok: bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return
|
||||
}
|
||||
|
||||
kce_val, found := root["KeyConditionExpression"]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
kce_str, str_ok := kce_val.(json.String)
|
||||
if !str_ok {
|
||||
return
|
||||
}
|
||||
|
||||
expr = strings.clone(string(kce_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
// Convenience: parse a complete Query key condition from request body
|
||||
parse_query_key_condition :: proc(request_body: []byte) -> (kc: Key_Condition, ok: bool) {
|
||||
expression := parse_key_condition_expression_string(request_body) or_return
|
||||
defer delete(expression)
|
||||
|
||||
attr_names := parse_expression_attribute_names(request_body)
|
||||
defer {
|
||||
if names, has_names := attr_names.?; has_names {
|
||||
for k, v in names {
|
||||
delete(k)
|
||||
delete(v)
|
||||
}
|
||||
names_copy := names
|
||||
delete(names_copy)
|
||||
}
|
||||
}
|
||||
|
||||
attr_values, vals_ok := parse_expression_attribute_values(request_body)
|
||||
if !vals_ok {
|
||||
return
|
||||
}
|
||||
defer {
|
||||
for k, v in attr_values {
|
||||
delete(k)
|
||||
v_copy := v
|
||||
attr_value_destroy(&v_copy)
|
||||
}
|
||||
delete(attr_values)
|
||||
}
|
||||
|
||||
kc, ok = parse_key_condition_expression(expression, attr_names, attr_values)
|
||||
return
|
||||
}
|
||||
833
dynamodb/filter.odin
Normal file
833
dynamodb/filter.odin
Normal file
@@ -0,0 +1,833 @@
|
||||
// FilterExpression and ProjectionExpression support
|
||||
// FilterExpression: post-retrieval filter applied to Scan/Query results
|
||||
// ProjectionExpression: return only specified attributes from items
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:strings"
|
||||
import "core:mem"
|
||||
|
||||
// ============================================================================
|
||||
// ProjectionExpression
|
||||
//
|
||||
// A comma-separated list of attribute names (with optional #name substitution)
|
||||
// that specifies which attributes to return.
|
||||
// ============================================================================
|
||||
|
||||
parse_projection_expression :: proc(
|
||||
request_body: []byte,
|
||||
attribute_names: Maybe(map[string]string),
|
||||
) -> (paths: []string, ok: bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return nil, false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
pe_val, found := root["ProjectionExpression"]
|
||||
if !found {
|
||||
return nil, false // absent is not an error, caller should check
|
||||
}
|
||||
|
||||
pe_str, str_ok := pe_val.(json.String)
|
||||
if !str_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Split by comma and resolve names
|
||||
parts := strings.split(string(pe_str), ",")
|
||||
result := make([dynamic]string)
|
||||
|
||||
for part in parts {
|
||||
trimmed := strings.trim_space(part)
|
||||
if len(trimmed) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
resolved, res_ok := resolve_attribute_name(trimmed, attribute_names)
|
||||
if !res_ok {
|
||||
// Cleanup previously cloned strings
|
||||
for path in result {
|
||||
delete(path)
|
||||
}
|
||||
delete(result)
|
||||
return nil, false
|
||||
}
|
||||
append(&result, strings.clone(resolved)) // Clone for safe storage
|
||||
}
|
||||
|
||||
return result[:], true
|
||||
}
|
||||
|
||||
// Apply projection to a single item — returns a new item with only the specified attributes
|
||||
apply_projection :: proc(item: Item, projection: []string) -> Item {
|
||||
if len(projection) == 0 {
|
||||
// No projection — return a deep copy of the full item
|
||||
return item_deep_copy(item)
|
||||
}
|
||||
|
||||
projected := make(Item)
|
||||
for path in projection {
|
||||
if val, found := item[path]; found {
|
||||
projected[strings.clone(path)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
return projected
|
||||
}
|
||||
|
||||
// Deep copy an entire item
|
||||
item_deep_copy :: proc(item: Item) -> Item {
|
||||
result := make(Item)
|
||||
for key, val in item {
|
||||
result[strings.clone(key)] = attr_value_deep_copy(val)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FilterExpression
|
||||
//
|
||||
// A condition expression applied post-retrieval. Supports:
|
||||
// - Comparisons: path = :val, path <> :val, path < :val, etc.
|
||||
// - BETWEEN: path BETWEEN :lo AND :hi
|
||||
// - IN: path IN (:v1, :v2, :v3)
|
||||
// - begins_with: begins_with(path, :prefix)
|
||||
// - contains: contains(path, :substr)
|
||||
// - attribute_exists(path)
|
||||
// - attribute_not_exists(path)
|
||||
// - AND / OR / NOT combinators
|
||||
//
|
||||
// This is a recursive-descent parser for condition expressions.
|
||||
// ============================================================================
|
||||
|
||||
// Parsed filter node (expression tree)
|
||||
Filter_Node_Type :: enum {
|
||||
Comparison, // path op value
|
||||
Between, // path BETWEEN lo AND hi
|
||||
In, // path IN (v1, v2, ...)
|
||||
Begins_With, // begins_with(path, value)
|
||||
Contains, // contains(path, value)
|
||||
Attribute_Exists, // attribute_exists(path)
|
||||
Attribute_Not_Exists, // attribute_not_exists(path)
|
||||
And, // left AND right
|
||||
Or, // left OR right
|
||||
Not, // NOT child
|
||||
}
|
||||
|
||||
Comparison_Op :: enum {
|
||||
EQ, // =
|
||||
NE, // <>
|
||||
LT, // <
|
||||
LE, // <=
|
||||
GT, // >
|
||||
GE, // >=
|
||||
}
|
||||
|
||||
Filter_Node :: struct {
|
||||
type: Filter_Node_Type,
|
||||
// For Comparison
|
||||
path: string,
|
||||
comp_op: Comparison_Op,
|
||||
value: Attribute_Value,
|
||||
// For Between
|
||||
value2: Maybe(Attribute_Value),
|
||||
// For In
|
||||
in_values: []Attribute_Value,
|
||||
// For And/Or
|
||||
left: ^Filter_Node,
|
||||
right: ^Filter_Node,
|
||||
// For Not
|
||||
child: ^Filter_Node,
|
||||
allocator: mem.Allocator, // allocator that created this node
|
||||
}
|
||||
|
||||
filter_node_destroy :: proc(node: ^Filter_Node) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
attr_value_destroy(&node.value)
|
||||
if v2, ok := node.value2.?; ok {
|
||||
v2_copy := v2
|
||||
attr_value_destroy(&v2_copy)
|
||||
}
|
||||
for &iv in node.in_values {
|
||||
attr_value_destroy(&iv)
|
||||
}
|
||||
if node.in_values != nil {
|
||||
delete(node.in_values)
|
||||
}
|
||||
|
||||
if node.left != nil {
|
||||
filter_node_destroy(node.left)
|
||||
}
|
||||
if node.right != nil {
|
||||
filter_node_destroy(node.right)
|
||||
}
|
||||
if node.child != nil {
|
||||
filter_node_destroy(node.child)
|
||||
}
|
||||
|
||||
// Free the node itself using the allocator that created it
|
||||
free(node, node.allocator)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Filter Expression Parser
|
||||
// ============================================================================
|
||||
|
||||
parse_filter_expression :: proc(
|
||||
expression: string,
|
||||
attribute_names: Maybe(map[string]string),
|
||||
attribute_values: map[string]Attribute_Value,
|
||||
) -> (node: ^Filter_Node, ok: bool) {
|
||||
t := tokenizer_init(expression)
|
||||
node, ok = parse_or_expr(&t, attribute_names, attribute_values)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Verify all tokens were consumed (no trailing garbage)
|
||||
if trailing := tokenizer_next(&t); trailing != nil {
|
||||
filter_node_destroy(node)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return node, true
|
||||
}
|
||||
|
||||
parse_or_expr :: proc(
|
||||
t: ^Tokenizer,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
left, left_ok := parse_and_expr(t, names, values)
|
||||
if !left_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
for {
|
||||
saved_pos := t.pos
|
||||
tok_maybe := tokenizer_next(t)
|
||||
tok, has := tok_maybe.?
|
||||
if !has {
|
||||
break
|
||||
}
|
||||
|
||||
if strings.equal_fold(tok, "OR") {
|
||||
right, right_ok := parse_and_expr(t, names, values)
|
||||
if !right_ok {
|
||||
filter_node_destroy(left)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
parent := make_filter_node()
|
||||
parent.type = .Or
|
||||
parent.left = left
|
||||
parent.right = right
|
||||
left = parent
|
||||
} else {
|
||||
t.pos = saved_pos
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return left, true
|
||||
}
|
||||
|
||||
parse_and_expr :: proc(
|
||||
t: ^Tokenizer,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
left, left_ok := parse_not_expr(t, names, values)
|
||||
if !left_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
for {
|
||||
saved_pos := t.pos
|
||||
tok_maybe := tokenizer_next(t)
|
||||
tok, has := tok_maybe.?
|
||||
if !has {
|
||||
break
|
||||
}
|
||||
|
||||
if strings.equal_fold(tok, "AND") {
|
||||
right, right_ok := parse_not_expr(t, names, values)
|
||||
if !right_ok {
|
||||
filter_node_destroy(left)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
parent := make_filter_node()
|
||||
parent.type = .And
|
||||
parent.left = left
|
||||
parent.right = right
|
||||
left = parent
|
||||
} else {
|
||||
t.pos = saved_pos
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return left, true
|
||||
}
|
||||
|
||||
parse_not_expr :: proc(
|
||||
t: ^Tokenizer,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
saved_pos := t.pos
|
||||
tok_maybe := tokenizer_next(t)
|
||||
tok, has := tok_maybe.?
|
||||
if !has {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if strings.equal_fold(tok, "NOT") {
|
||||
child, child_ok := parse_primary_expr(t, names, values)
|
||||
if !child_ok {
|
||||
return nil, false
|
||||
}
|
||||
node := make_filter_node()
|
||||
node.type = .Not
|
||||
node.child = child
|
||||
return node, true
|
||||
}
|
||||
|
||||
t.pos = saved_pos
|
||||
return parse_primary_expr(t, names, values)
|
||||
}
|
||||
|
||||
parse_primary_expr :: proc(
|
||||
t: ^Tokenizer,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
first_tok, first_ok := next_token(t)
|
||||
if !first_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Parenthesized expression
|
||||
if first_tok == "(" {
|
||||
inner, inner_ok := parse_or_expr(t, names, values)
|
||||
if !inner_ok {
|
||||
return nil, false
|
||||
}
|
||||
rparen, rp_ok := next_token(t)
|
||||
if !rp_ok || rparen != ")" {
|
||||
filter_node_destroy(inner)
|
||||
return nil, false
|
||||
}
|
||||
return inner, true
|
||||
}
|
||||
|
||||
// Function-style: begins_with, contains, attribute_exists, attribute_not_exists
|
||||
if strings.equal_fold(first_tok, "begins_with") {
|
||||
return parse_filter_begins_with(t, names, values)
|
||||
}
|
||||
if strings.equal_fold(first_tok, "contains") {
|
||||
return parse_filter_contains(t, names, values)
|
||||
}
|
||||
if strings.equal_fold(first_tok, "attribute_exists") {
|
||||
return parse_filter_attr_exists(t, names, true)
|
||||
}
|
||||
if strings.equal_fold(first_tok, "attribute_not_exists") {
|
||||
return parse_filter_attr_exists(t, names, false)
|
||||
}
|
||||
|
||||
// Comparison, BETWEEN, or IN: path op value
|
||||
path, path_ok := resolve_attribute_name(first_tok, names)
|
||||
if !path_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
op_tok, op_ok := next_token(t)
|
||||
if !op_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// BETWEEN
|
||||
if strings.equal_fold(op_tok, "BETWEEN") {
|
||||
return parse_filter_between(t, path, names, values)
|
||||
}
|
||||
|
||||
// IN
|
||||
if strings.equal_fold(op_tok, "IN") {
|
||||
return parse_filter_in(t, path, names, values)
|
||||
}
|
||||
|
||||
// Comparison operators
|
||||
comp_op: Comparison_Op
|
||||
if op_tok == "=" {
|
||||
comp_op = .EQ
|
||||
} else if op_tok == "<>" {
|
||||
comp_op = .NE
|
||||
} else if op_tok == "<" {
|
||||
comp_op = .LT
|
||||
} else if op_tok == "<=" {
|
||||
comp_op = .LE
|
||||
} else if op_tok == ">" {
|
||||
comp_op = .GT
|
||||
} else if op_tok == ">=" {
|
||||
comp_op = .GE
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
return nil, false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := make_filter_node()
|
||||
node.type = .Comparison
|
||||
node.path = path
|
||||
node.comp_op = comp_op
|
||||
node.value = val
|
||||
return node, true
|
||||
}
|
||||
|
||||
parse_filter_begins_with :: proc(
|
||||
t: ^Tokenizer,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
lparen, lp_ok := next_token(t)
|
||||
if !lp_ok || lparen != "(" {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
path_tok, path_ok := next_token(t)
|
||||
if !path_ok {
|
||||
return nil, false
|
||||
}
|
||||
path, path_resolved := resolve_attribute_name(path_tok, names)
|
||||
if !path_resolved {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
comma, comma_ok := next_token(t)
|
||||
if !comma_ok || comma != "," {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
return nil, false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
rparen, rp_ok := next_token(t)
|
||||
if !rp_ok || rparen != ")" {
|
||||
attr_value_destroy(&val)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := make_filter_node()
|
||||
node.type = .Begins_With
|
||||
node.path = path
|
||||
node.value = val
|
||||
return node, true
|
||||
}
|
||||
|
||||
parse_filter_contains :: proc(
|
||||
t: ^Tokenizer,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
lparen, lp_ok := next_token(t)
|
||||
if !lp_ok || lparen != "(" {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
path_tok, path_ok := next_token(t)
|
||||
if !path_ok {
|
||||
return nil, false
|
||||
}
|
||||
path, path_resolved := resolve_attribute_name(path_tok, names)
|
||||
if !path_resolved {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
comma, comma_ok := next_token(t)
|
||||
if !comma_ok || comma != "," {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
return nil, false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
rparen, rp_ok := next_token(t)
|
||||
if !rp_ok || rparen != ")" {
|
||||
attr_value_destroy(&val)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := make_filter_node()
|
||||
node.type = .Contains
|
||||
node.path = path
|
||||
node.value = val
|
||||
return node, true
|
||||
}
|
||||
|
||||
parse_filter_attr_exists :: proc(
|
||||
t: ^Tokenizer,
|
||||
names: Maybe(map[string]string),
|
||||
exists: bool,
|
||||
) -> (^Filter_Node, bool) {
|
||||
lparen, lp_ok := next_token(t)
|
||||
if !lp_ok || lparen != "(" {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
path_tok, path_ok := next_token(t)
|
||||
if !path_ok {
|
||||
return nil, false
|
||||
}
|
||||
path, path_resolved := resolve_attribute_name(path_tok, names)
|
||||
if !path_resolved {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
rparen, rp_ok := next_token(t)
|
||||
if !rp_ok || rparen != ")" {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := make_filter_node()
|
||||
node.type = .Attribute_Exists if exists else .Attribute_Not_Exists
|
||||
node.path = path
|
||||
return node, true
|
||||
}
|
||||
|
||||
parse_filter_between :: proc(
|
||||
t: ^Tokenizer,
|
||||
path: string,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
lo_tok, lo_ok := next_token(t)
|
||||
if !lo_ok {
|
||||
return nil, false
|
||||
}
|
||||
lo_val, lo_val_ok := resolve_attribute_value(lo_tok, values)
|
||||
if !lo_val_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
and_tok, and_ok := next_token(t)
|
||||
if !and_ok || !strings.equal_fold(and_tok, "AND") {
|
||||
attr_value_destroy(&lo_val)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
hi_tok, hi_ok := next_token(t)
|
||||
if !hi_ok {
|
||||
attr_value_destroy(&lo_val)
|
||||
return nil, false
|
||||
}
|
||||
hi_val, hi_val_ok := resolve_attribute_value(hi_tok, values)
|
||||
if !hi_val_ok {
|
||||
attr_value_destroy(&lo_val)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
node := make_filter_node()
|
||||
node.type = .Between
|
||||
node.path = path
|
||||
node.value = lo_val
|
||||
node.value2 = hi_val
|
||||
return node, true
|
||||
}
|
||||
|
||||
parse_filter_in :: proc(
|
||||
t: ^Tokenizer,
|
||||
path: string,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (^Filter_Node, bool) {
|
||||
lparen, lp_ok := next_token(t)
|
||||
if !lp_ok || lparen != "(" {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
in_vals := make([dynamic]Attribute_Value)
|
||||
|
||||
for {
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
for &v in in_vals {
|
||||
attr_value_destroy(&v)
|
||||
}
|
||||
delete(in_vals)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
for &v in in_vals {
|
||||
attr_value_destroy(&v)
|
||||
}
|
||||
delete(in_vals)
|
||||
return nil, false
|
||||
}
|
||||
append(&in_vals, val)
|
||||
|
||||
sep_tok, sep_ok := next_token(t)
|
||||
if !sep_ok {
|
||||
for &v in in_vals {
|
||||
attr_value_destroy(&v)
|
||||
}
|
||||
delete(in_vals)
|
||||
return nil, false
|
||||
}
|
||||
if sep_tok == ")" {
|
||||
break
|
||||
}
|
||||
if sep_tok != "," {
|
||||
for &v in in_vals {
|
||||
attr_value_destroy(&v)
|
||||
}
|
||||
delete(in_vals)
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
node := make_filter_node()
|
||||
node.type = .In
|
||||
node.path = path
|
||||
node.in_values = in_vals[:]
|
||||
return node, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Filter Expression Evaluation
|
||||
// ============================================================================
|
||||
|
||||
evaluate_filter :: proc(item: Item, node: ^Filter_Node) -> bool {
|
||||
if node == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
switch node.type {
|
||||
case .Comparison:
|
||||
attr, found := item[node.path]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
return evaluate_comparison(attr, node.comp_op, node.value)
|
||||
|
||||
case .Between:
|
||||
attr, found := item[node.path]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
lo_cmp := compare_attribute_values(attr, node.value)
|
||||
if v2, ok := node.value2.?; ok {
|
||||
hi_cmp := compare_attribute_values(attr, v2)
|
||||
return lo_cmp >= 0 && hi_cmp <= 0
|
||||
}
|
||||
return false
|
||||
|
||||
case .In:
|
||||
attr, found := item[node.path]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
for in_val in node.in_values {
|
||||
if compare_attribute_values(attr, in_val) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
case .Begins_With:
|
||||
attr, found := item[node.path]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
attr_str, attr_ok := attr_value_to_string_for_compare(attr)
|
||||
val_str, val_ok := attr_value_to_string_for_compare(node.value)
|
||||
if !attr_ok || !val_ok {
|
||||
return false
|
||||
}
|
||||
return strings.has_prefix(attr_str, val_str)
|
||||
|
||||
case .Contains:
|
||||
attr, found := item[node.path]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
return evaluate_contains(attr, node.value)
|
||||
|
||||
case .Attribute_Exists:
|
||||
_, found := item[node.path]
|
||||
return found
|
||||
|
||||
case .Attribute_Not_Exists:
|
||||
_, found := item[node.path]
|
||||
return !found
|
||||
|
||||
case .And:
|
||||
return evaluate_filter(item, node.left) && evaluate_filter(item, node.right)
|
||||
|
||||
case .Or:
|
||||
return evaluate_filter(item, node.left) || evaluate_filter(item, node.right)
|
||||
|
||||
case .Not:
|
||||
return !evaluate_filter(item, node.child)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
evaluate_comparison :: proc(attr: Attribute_Value, op: Comparison_Op, val: Attribute_Value) -> bool {
|
||||
cmp := compare_attribute_values(attr, val)
|
||||
|
||||
// -2 means types are incomparable - all comparisons return false
|
||||
// (matches DynamoDB behavior: mixed-type comparisons always fail)
|
||||
if cmp == -2 {
|
||||
return false
|
||||
}
|
||||
|
||||
switch op {
|
||||
case .EQ: return cmp == 0
|
||||
case .NE: return cmp != 0
|
||||
case .LT: return cmp < 0
|
||||
case .LE: return cmp <= 0
|
||||
case .GT: return cmp > 0
|
||||
case .GE: return cmp >= 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
evaluate_contains :: proc(attr: Attribute_Value, val: Attribute_Value) -> bool {
|
||||
// For strings: substring check
|
||||
#partial switch a in attr {
|
||||
case String:
|
||||
if v, ok := val.(String); ok {
|
||||
return strings.contains(string(a), string(v))
|
||||
}
|
||||
|
||||
case String_Set:
|
||||
if v, ok := val.(String); ok {
|
||||
for s in a {
|
||||
if s == string(v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case DDB_Number_Set:
|
||||
if v, ok := val.(DDB_Number); ok {
|
||||
for num in a {
|
||||
if compare_ddb_numbers(num, v) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case List:
|
||||
for item in a {
|
||||
if compare_attribute_values(item, val) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare two AttributeValues. Returns <0, 0, or >0.
|
||||
// For mixed types, returns -2 (not comparable).
|
||||
compare_attribute_values :: proc(a: Attribute_Value, b: Attribute_Value) -> int {
|
||||
a_str, a_ok := attr_value_to_string_for_compare(a)
|
||||
b_str, b_ok := attr_value_to_string_for_compare(b)
|
||||
|
||||
if !a_ok || !b_ok {
|
||||
// Try bool comparison
|
||||
a_bool, a_is_bool := a.(Bool)
|
||||
b_bool, b_is_bool := b.(Bool)
|
||||
if a_is_bool && b_is_bool {
|
||||
if bool(a_bool) == bool(b_bool) {
|
||||
return 0
|
||||
}
|
||||
return -2
|
||||
}
|
||||
return -2
|
||||
}
|
||||
|
||||
// For Numbers, do with DDB_Number comparison
|
||||
_, a_is_num := a.(DDB_Number)
|
||||
_, b_is_num := b.(DDB_Number)
|
||||
if a_is_num && b_is_num {
|
||||
a_num := a.(DDB_Number)
|
||||
b_num := b.(DDB_Number)
|
||||
return compare_ddb_numbers(a_num, b_num)
|
||||
}
|
||||
|
||||
return strings.compare(a_str, b_str)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Request parsing helpers for FilterExpression
|
||||
// ============================================================================
|
||||
|
||||
parse_filter_expression_string :: proc(request_body: []byte) -> (expr: string, ok: bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return
|
||||
}
|
||||
|
||||
fe_val, found := root["FilterExpression"]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
fe_str, str_ok := fe_val.(json.String)
|
||||
if !str_ok {
|
||||
return
|
||||
}
|
||||
|
||||
expr = strings.clone(string(fe_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Allocator Helper
|
||||
// ============================================================================
|
||||
|
||||
make_filter_node :: proc() -> ^Filter_Node {
|
||||
node := new(Filter_Node)
|
||||
node.allocator = context.allocator
|
||||
return node
|
||||
}
|
||||
510
dynamodb/gsi.odin
Normal file
510
dynamodb/gsi.odin
Normal file
@@ -0,0 +1,510 @@
|
||||
// Global Secondary Index (GSI) support
|
||||
//
|
||||
// DynamoDB GSI semantics:
|
||||
// - GSI entries are maintained automatically on every write (put/delete/update)
|
||||
// - Each GSI has its own key schema (partition key + optional sort key)
|
||||
// - GSI keys are built from item attributes; if an item doesn't have the GSI
|
||||
// key attribute(s), NO GSI entry is written (sparse index)
|
||||
// - Projection controls which non-key attributes are stored in the GSI entry:
|
||||
// ALL → entire item is copied
|
||||
// KEYS_ONLY → only table PK/SK + GSI PK/SK
|
||||
// INCLUDE → table keys + GSI keys + specified non-key attributes
|
||||
// - Query on a GSI uses IndexName to route to the correct key prefix
|
||||
//
|
||||
// Storage layout:
|
||||
// GSI key: [0x03][table_name][index_name][gsi_pk_value][gsi_sk_value?]
|
||||
// GSI value: TLV-encoded projected item (same binary format as regular items)
|
||||
//
|
||||
// Write path:
|
||||
// put_item → for each GSI, extract GSI key attrs from the NEW item, write GSI entry
|
||||
// delete → for each GSI, extract GSI key attrs from the OLD item, delete GSI entry
|
||||
// update → delete OLD GSI entries, write NEW GSI entries
|
||||
//
|
||||
// ATOMICITY: All GSI operations use WriteBatch to ensure that GSI entries are
|
||||
// maintained atomically with the base item write/delete.
|
||||
//
|
||||
package dynamodb
|
||||
|
||||
import "core:slice"
|
||||
import "core:strings"
|
||||
import "../rocksdb"
|
||||
|
||||
// ============================================================================
|
||||
// GSI Key Extraction
|
||||
//
|
||||
// Extracts the GSI partition key (and optional sort key) raw bytes from an item.
|
||||
// Returns false if the item doesn't have the required GSI PK attribute (sparse).
|
||||
// ============================================================================
|
||||
|
||||
GSI_Key_Values :: struct {
|
||||
pk: []byte,
|
||||
sk: Maybe([]byte),
|
||||
}
|
||||
|
||||
// Extract GSI key values from an item based on the GSI's key schema.
|
||||
// Returns ok=false if ANY required key attribute is missing (sparse index).
|
||||
// DynamoDB sparse index semantics: item must have ALL key attributes defined in the GSI schema.
|
||||
gsi_extract_key_values :: proc(item: Item, gsi_key_schema: []Key_Schema_Element) -> (GSI_Key_Values, bool) {
|
||||
result: GSI_Key_Values
|
||||
|
||||
for ks in gsi_key_schema {
|
||||
attr, found := item[ks.attribute_name]
|
||||
if !found {
|
||||
// Any key attribute missing → sparse index, skip this item
|
||||
return {}, false
|
||||
}
|
||||
|
||||
raw, raw_ok := attr_value_to_bytes(attr)
|
||||
if !raw_ok {
|
||||
// Can't convert attribute to bytes → skip this item
|
||||
return {}, false
|
||||
}
|
||||
|
||||
switch ks.key_type {
|
||||
case .HASH:
|
||||
result.pk = raw
|
||||
case .RANGE:
|
||||
result.sk = raw
|
||||
}
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
// Convert a scalar attribute value to its raw byte representation (borrowed).
|
||||
attr_value_to_bytes :: proc(attr: Attribute_Value) -> ([]byte, bool) {
|
||||
#partial switch v in attr {
|
||||
case String:
|
||||
return transmute([]byte)string(v), true
|
||||
case DDB_Number:
|
||||
return encode_ddb_number_for_sort(v), true
|
||||
case Binary:
|
||||
return transmute([]byte)string(v), true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GSI Projection
|
||||
//
|
||||
// Build a projected copy of an item for storage in a GSI entry.
|
||||
// ============================================================================
|
||||
|
||||
// Build the projected item for a GSI entry.
|
||||
// The result is a new Item that the caller owns.
|
||||
gsi_project_item :: proc(
|
||||
item: Item,
|
||||
gsi: ^Global_Secondary_Index,
|
||||
table_key_schema: []Key_Schema_Element,
|
||||
) -> Item {
|
||||
switch gsi.projection.projection_type {
|
||||
case .ALL:
|
||||
return item_deep_copy(item)
|
||||
|
||||
case .KEYS_ONLY:
|
||||
projected := make(Item)
|
||||
// Include table key attributes
|
||||
for ks in table_key_schema {
|
||||
if val, found := item[ks.attribute_name]; found {
|
||||
projected[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
// Include GSI key attributes
|
||||
for ks in gsi.key_schema {
|
||||
if _, already := projected[ks.attribute_name]; already {
|
||||
continue // Already included as table key
|
||||
}
|
||||
if val, found := item[ks.attribute_name]; found {
|
||||
projected[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
return projected
|
||||
|
||||
case .INCLUDE:
|
||||
projected := make(Item)
|
||||
// Include table key attributes
|
||||
for ks in table_key_schema {
|
||||
if val, found := item[ks.attribute_name]; found {
|
||||
projected[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
// Include GSI key attributes
|
||||
for ks in gsi.key_schema {
|
||||
if _, already := projected[ks.attribute_name]; already {
|
||||
continue
|
||||
}
|
||||
if val, found := item[ks.attribute_name]; found {
|
||||
projected[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
// Include specified non-key attributes
|
||||
if nka, has_nka := gsi.projection.non_key_attributes.?; has_nka {
|
||||
for attr_name in nka {
|
||||
if _, already := projected[attr_name]; already {
|
||||
continue
|
||||
}
|
||||
if val, found := item[attr_name]; found {
|
||||
projected[strings.clone(attr_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return projected
|
||||
}
|
||||
|
||||
// Fallback: all
|
||||
return item_deep_copy(item)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GSI Write Maintenance - ATOMIC via WriteBatch
|
||||
//
|
||||
// These procedures add GSI operations to a WriteBatch instead of performing
|
||||
// direct database writes. This ensures atomicity with the base item operation.
|
||||
// ============================================================================
|
||||
|
||||
// Add GSI write operations to a WriteBatch for an item across all GSIs.
|
||||
// Called during put_item or update_item to maintain NEW GSI entries.
|
||||
gsi_batch_write_entries :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
table_name: string,
|
||||
item: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> Storage_Error {
|
||||
gsis, has_gsis := metadata.global_secondary_indexes.?
|
||||
if !has_gsis || len(gsis) == 0 {
|
||||
return .None
|
||||
}
|
||||
|
||||
base_key, base_ok := key_from_item(item, metadata.key_schema)
|
||||
if !base_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&base_key)
|
||||
|
||||
base_vals, base_vals_ok := key_get_values(&base_key)
|
||||
if !base_vals_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
for &gsi in gsis {
|
||||
// Extract GSI key from item
|
||||
gsi_kv, kv_ok := gsi_extract_key_values(item, gsi.key_schema)
|
||||
if !kv_ok do continue // item doesn't have GSI PK, skip
|
||||
|
||||
// Build GSI storage key
|
||||
gsi_storage_key := build_gsi_key(
|
||||
table_name,
|
||||
gsi.index_name,
|
||||
gsi_kv.pk,
|
||||
gsi_kv.sk,
|
||||
base_vals.pk,
|
||||
base_vals.sk,
|
||||
)
|
||||
defer delete(gsi_storage_key)
|
||||
|
||||
// Build projected item
|
||||
projected := gsi_project_item(item, &gsi, metadata.key_schema)
|
||||
defer item_destroy(&projected)
|
||||
|
||||
// Encode projected item
|
||||
encoded, encode_ok := encode(projected)
|
||||
if !encode_ok {
|
||||
return .Serialization_Error
|
||||
}
|
||||
defer delete(encoded)
|
||||
|
||||
// Add to batch (not written yet)
|
||||
rocksdb.batch_put(batch, gsi_storage_key, encoded)
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// Add GSI delete operations to a WriteBatch for an item across all GSIs.
|
||||
// Called during delete_item or update_item to remove OLD GSI entries.
|
||||
// Needs the OLD item to know which GSI keys to remove.
|
||||
gsi_batch_delete_entries :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
table_name: string,
|
||||
old_item: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> Storage_Error {
|
||||
gsis, has_gsis := metadata.global_secondary_indexes.?
|
||||
if !has_gsis || len(gsis) == 0 {
|
||||
return .None
|
||||
}
|
||||
|
||||
base_key, base_ok := key_from_item(old_item, metadata.key_schema)
|
||||
if !base_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&base_key)
|
||||
|
||||
base_vals, base_vals_ok := key_get_values(&base_key)
|
||||
if !base_vals_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
for &gsi in gsis {
|
||||
// Extract GSI key from item
|
||||
gsi_kv, kv_ok := gsi_extract_key_values(old_item, gsi.key_schema)
|
||||
if !kv_ok do continue // old item doesn't have GSI PK, skip
|
||||
|
||||
// Build GSI storage key
|
||||
gsi_storage_key := build_gsi_key(
|
||||
table_name,
|
||||
gsi.index_name,
|
||||
gsi_kv.pk,
|
||||
gsi_kv.sk,
|
||||
base_vals.pk,
|
||||
base_vals.sk,
|
||||
)
|
||||
defer delete(gsi_storage_key)
|
||||
|
||||
// Add to batch (not written yet)
|
||||
rocksdb.batch_delete(batch, gsi_storage_key)
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GSI Query
|
||||
//
|
||||
// Queries a GSI by partition key with optional sort key condition.
|
||||
// Mirrors the main table query() but uses GSI key prefix.
|
||||
// ============================================================================
|
||||
|
||||
gsi_query :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
index_name: string,
|
||||
partition_key_value: []byte,
|
||||
exclusive_start_key: Maybe([]byte),
|
||||
limit: int,
|
||||
sk_condition: Maybe(Sort_Key_Condition) = nil,
|
||||
) -> (Query_Result, Storage_Error) {
|
||||
// Build GSI partition prefix
|
||||
prefix := build_gsi_partition_prefix(table_name, index_name, partition_key_value)
|
||||
defer delete(prefix)
|
||||
|
||||
iter, iter_err := rocksdb.iter_create(&engine.db)
|
||||
if iter_err != .None {
|
||||
return {}, .RocksDB_Error
|
||||
}
|
||||
defer rocksdb.iter_destroy(&iter)
|
||||
|
||||
max_items := limit if limit > 0 else 1_000_000
|
||||
|
||||
// Seek to start position
|
||||
if start_key, has_start := exclusive_start_key.?; has_start {
|
||||
if has_prefix(start_key, prefix) {
|
||||
rocksdb.iter_seek(&iter, start_key)
|
||||
if rocksdb.iter_valid(&iter) {
|
||||
rocksdb.iter_next(&iter)
|
||||
}
|
||||
} else {
|
||||
rocksdb.iter_seek(&iter, prefix)
|
||||
}
|
||||
} else {
|
||||
rocksdb.iter_seek(&iter, prefix)
|
||||
}
|
||||
|
||||
items := make([dynamic]Item)
|
||||
count := 0
|
||||
last_key: Maybe([]byte) = nil
|
||||
has_more := false
|
||||
|
||||
for rocksdb.iter_valid(&iter) {
|
||||
key := rocksdb.iter_key(&iter)
|
||||
if key == nil || !has_prefix(key, prefix) {
|
||||
break
|
||||
}
|
||||
|
||||
if count >= max_items {
|
||||
has_more = true
|
||||
break
|
||||
}
|
||||
|
||||
value := rocksdb.iter_value(&iter)
|
||||
if value == nil {
|
||||
rocksdb.iter_next(&iter)
|
||||
continue
|
||||
}
|
||||
|
||||
item, decode_ok := decode(value)
|
||||
if !decode_ok {
|
||||
rocksdb.iter_next(&iter)
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort key condition filtering
|
||||
if skc, has_skc := sk_condition.?; has_skc {
|
||||
if !evaluate_sort_key_condition(item, &skc) {
|
||||
item_copy := item
|
||||
item_destroy(&item_copy)
|
||||
rocksdb.iter_next(&iter)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
append(&items, item)
|
||||
count += 1
|
||||
|
||||
// Track key of last returned item
|
||||
if prev_key, had_prev := last_key.?; had_prev {
|
||||
delete(prev_key)
|
||||
}
|
||||
last_key = slice.clone(key)
|
||||
|
||||
rocksdb.iter_next(&iter)
|
||||
}
|
||||
|
||||
// Only emit LastEvaluatedKey if there are more items
|
||||
if !has_more {
|
||||
if lk, had_lk := last_key.?; had_lk {
|
||||
delete(lk)
|
||||
}
|
||||
last_key = nil
|
||||
}
|
||||
|
||||
result_items := make([]Item, len(items))
|
||||
copy(result_items, items[:])
|
||||
|
||||
return Query_Result{
|
||||
items = result_items,
|
||||
last_evaluated_key = last_key,
|
||||
}, .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GSI Scan
|
||||
//
|
||||
// Scans all entries in a GSI (all partition keys under that index).
|
||||
// ============================================================================
|
||||
|
||||
gsi_scan :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
index_name: string,
|
||||
exclusive_start_key: Maybe([]byte),
|
||||
limit: int,
|
||||
) -> (Scan_Result, Storage_Error) {
|
||||
prefix := build_gsi_prefix(table_name, index_name)
|
||||
defer delete(prefix)
|
||||
|
||||
iter, iter_err := rocksdb.iter_create(&engine.db)
|
||||
if iter_err != .None {
|
||||
return {}, .RocksDB_Error
|
||||
}
|
||||
defer rocksdb.iter_destroy(&iter)
|
||||
|
||||
max_items := limit if limit > 0 else 1_000_000
|
||||
|
||||
if start_key, has_start := exclusive_start_key.?; has_start {
|
||||
if has_prefix(start_key, prefix) {
|
||||
rocksdb.iter_seek(&iter, start_key)
|
||||
if rocksdb.iter_valid(&iter) {
|
||||
rocksdb.iter_next(&iter)
|
||||
}
|
||||
} else {
|
||||
rocksdb.iter_seek(&iter, prefix)
|
||||
}
|
||||
} else {
|
||||
rocksdb.iter_seek(&iter, prefix)
|
||||
}
|
||||
|
||||
items := make([dynamic]Item)
|
||||
count := 0
|
||||
last_key: Maybe([]byte) = nil
|
||||
has_more := false
|
||||
|
||||
for rocksdb.iter_valid(&iter) {
|
||||
key := rocksdb.iter_key(&iter)
|
||||
if key == nil || !has_prefix(key, prefix) {
|
||||
break
|
||||
}
|
||||
|
||||
if count >= max_items {
|
||||
has_more = true
|
||||
break
|
||||
}
|
||||
|
||||
value := rocksdb.iter_value(&iter)
|
||||
if value == nil {
|
||||
rocksdb.iter_next(&iter)
|
||||
continue
|
||||
}
|
||||
|
||||
item, decode_ok := decode(value)
|
||||
if !decode_ok {
|
||||
rocksdb.iter_next(&iter)
|
||||
continue
|
||||
}
|
||||
|
||||
append(&items, item)
|
||||
count += 1
|
||||
|
||||
if prev_key, had_prev := last_key.?; had_prev {
|
||||
delete(prev_key)
|
||||
}
|
||||
last_key = slice.clone(key)
|
||||
|
||||
rocksdb.iter_next(&iter)
|
||||
}
|
||||
|
||||
if !has_more {
|
||||
if lk, had_lk := last_key.?; had_lk {
|
||||
delete(lk)
|
||||
}
|
||||
last_key = nil
|
||||
}
|
||||
|
||||
result_items := make([]Item, len(items))
|
||||
copy(result_items, items[:])
|
||||
|
||||
return Scan_Result{
|
||||
items = result_items,
|
||||
last_evaluated_key = last_key,
|
||||
}, .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GSI Metadata Lookup Helpers
|
||||
// ============================================================================
|
||||
|
||||
// Find a GSI definition by index name in the table metadata.
|
||||
find_gsi :: proc(metadata: ^Table_Metadata, index_name: string) -> (^Global_Secondary_Index, bool) {
|
||||
gsis, has_gsis := metadata.global_secondary_indexes.?
|
||||
if !has_gsis {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
for &gsi in gsis {
|
||||
if gsi.index_name == index_name {
|
||||
return &gsi, true
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Get the GSI's sort key attribute name (if any).
|
||||
gsi_get_sort_key_name :: proc(gsi: ^Global_Secondary_Index) -> Maybe(string) {
|
||||
for ks in gsi.key_schema {
|
||||
if ks.key_type == .RANGE {
|
||||
return ks.attribute_name
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the GSI's partition key attribute name.
|
||||
gsi_get_partition_key_name :: proc(gsi: ^Global_Secondary_Index) -> Maybe(string) {
|
||||
for ks in gsi.key_schema {
|
||||
if ks.key_type == .HASH {
|
||||
return ks.attribute_name
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
187
dynamodb/gsi_metadata.odin
Normal file
187
dynamodb/gsi_metadata.odin
Normal file
@@ -0,0 +1,187 @@
|
||||
// gsi_metadata.odin — GSI metadata parsing for serialize/deserialize_table_metadata
|
||||
//
|
||||
// Parses GSI definitions from the embedded JSON string stored in table metadata.
|
||||
// This file lives in the dynamodb/ package.
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:mem"
|
||||
import "core:strings"
|
||||
|
||||
// Parse GlobalSecondaryIndexes from a JSON string like:
|
||||
// [{"IndexName":"email-index","KeySchema":[{"AttributeName":"email","KeyType":"HASH"}],
|
||||
// "Projection":{"ProjectionType":"ALL"}}]
|
||||
//
|
||||
// Allocates all strings with the given allocator (engine.allocator for long-lived data).
|
||||
parse_gsis_json :: proc(json_str: string, allocator: mem.Allocator) -> ([]Global_Secondary_Index, bool) {
|
||||
data, parse_err := json.parse(transmute([]byte)json_str, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return nil, false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
arr, ok := data.(json.Array)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if len(arr) == 0 {
|
||||
return nil, true // Empty is valid
|
||||
}
|
||||
|
||||
result := make([]Global_Secondary_Index, len(arr), allocator)
|
||||
|
||||
for elem, i in arr {
|
||||
obj, obj_ok := elem.(json.Object)
|
||||
if !obj_ok {
|
||||
cleanup_gsis(result[:i], allocator)
|
||||
delete(result, allocator)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
gsi, gsi_ok := parse_single_gsi_json(obj, allocator)
|
||||
if !gsi_ok {
|
||||
cleanup_gsis(result[:i], allocator)
|
||||
delete(result, allocator)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
result[i] = gsi
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
// Parse a single GSI object from JSON
|
||||
@(private = "file")
|
||||
parse_single_gsi_json :: proc(obj: json.Object, allocator: mem.Allocator) -> (Global_Secondary_Index, bool) {
|
||||
gsi: Global_Secondary_Index
|
||||
|
||||
// IndexName
|
||||
idx_val, idx_found := obj["IndexName"]
|
||||
if !idx_found {
|
||||
return {}, false
|
||||
}
|
||||
idx_str, idx_ok := idx_val.(json.String)
|
||||
if !idx_ok {
|
||||
return {}, false
|
||||
}
|
||||
gsi.index_name = strings.clone(string(idx_str), allocator)
|
||||
|
||||
// KeySchema
|
||||
ks_val, ks_found := obj["KeySchema"]
|
||||
if !ks_found {
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
ks_arr, ks_ok := ks_val.(json.Array)
|
||||
if !ks_ok || len(ks_arr) == 0 || len(ks_arr) > 2 {
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
key_schema := make([]Key_Schema_Element, len(ks_arr), allocator)
|
||||
for ks_elem, j in ks_arr {
|
||||
ks_obj, kobj_ok := ks_elem.(json.Object)
|
||||
if !kobj_ok {
|
||||
for k in 0..<j {
|
||||
delete(key_schema[k].attribute_name, allocator)
|
||||
}
|
||||
delete(key_schema, allocator)
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
an_val, an_found := ks_obj["AttributeName"]
|
||||
if !an_found {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name, allocator) }
|
||||
delete(key_schema, allocator)
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
an_str, an_ok := an_val.(json.String)
|
||||
if !an_ok {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name, allocator) }
|
||||
delete(key_schema, allocator)
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
kt_val, kt_found := ks_obj["KeyType"]
|
||||
if !kt_found {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name, allocator) }
|
||||
delete(key_schema, allocator)
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
kt_str, kt_ok := kt_val.(json.String)
|
||||
if !kt_ok {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name, allocator) }
|
||||
delete(key_schema, allocator)
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
kt, kt_parse_ok := key_type_from_string(string(kt_str))
|
||||
if !kt_parse_ok {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name, allocator) }
|
||||
delete(key_schema, allocator)
|
||||
delete(gsi.index_name, allocator)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
key_schema[j] = Key_Schema_Element{
|
||||
attribute_name = strings.clone(string(an_str), allocator),
|
||||
key_type = kt,
|
||||
}
|
||||
}
|
||||
gsi.key_schema = key_schema
|
||||
|
||||
// Projection
|
||||
gsi.projection.projection_type = .ALL // default
|
||||
if proj_val, proj_found := obj["Projection"]; proj_found {
|
||||
if proj_obj, proj_ok := proj_val.(json.Object); proj_ok {
|
||||
if pt_val, pt_found := proj_obj["ProjectionType"]; pt_found {
|
||||
if pt_str, pt_ok := pt_val.(json.String); pt_ok {
|
||||
switch string(pt_str) {
|
||||
case "ALL": gsi.projection.projection_type = .ALL
|
||||
case "KEYS_ONLY": gsi.projection.projection_type = .KEYS_ONLY
|
||||
case "INCLUDE": gsi.projection.projection_type = .INCLUDE
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NonKeyAttributes
|
||||
if nka_val, nka_found := proj_obj["NonKeyAttributes"]; nka_found {
|
||||
if nka_arr, nka_ok := nka_val.(json.Array); nka_ok && len(nka_arr) > 0 {
|
||||
nka := make([]string, len(nka_arr), allocator)
|
||||
for attr_val, k in nka_arr {
|
||||
if attr_str, attr_ok := attr_val.(json.String); attr_ok {
|
||||
nka[k] = strings.clone(string(attr_str), allocator)
|
||||
}
|
||||
}
|
||||
gsi.projection.non_key_attributes = nka
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gsi, true
|
||||
}
|
||||
|
||||
// Clean up partially-constructed GSI array
|
||||
cleanup_gsis :: proc(gsis: []Global_Secondary_Index, allocator: mem.Allocator) {
|
||||
for gsi in gsis {
|
||||
delete(gsi.index_name, allocator)
|
||||
for ks in gsi.key_schema {
|
||||
delete(ks.attribute_name, allocator)
|
||||
}
|
||||
delete(gsi.key_schema, allocator)
|
||||
if nka, has_nka := gsi.projection.non_key_attributes.?; has_nka {
|
||||
for attr in nka {
|
||||
delete(attr, allocator)
|
||||
}
|
||||
delete(nka, allocator)
|
||||
}
|
||||
}
|
||||
}
|
||||
535
dynamodb/item_codec.odin
Normal file
535
dynamodb/item_codec.odin
Normal file
@@ -0,0 +1,535 @@
|
||||
// Binary TLV (Type-Length-Value) encoding for DynamoDB items
|
||||
// Replaces JSON storage with efficient binary format
|
||||
// Format: [attribute_count][name_len][name][type_tag][value_len][value]...
|
||||
package dynamodb
|
||||
|
||||
import "core:bytes"
|
||||
import "core:slice"
|
||||
|
||||
// Type tags for binary encoding (1 byte each)
|
||||
Type_Tag :: enum u8 {
|
||||
// Scalar types
|
||||
String = 0x01, // S
|
||||
Number = 0x02, // N (stored as string)
|
||||
Binary = 0x03, // B (base64 string)
|
||||
Boolean = 0x04, // BOOL
|
||||
Null = 0x05, // NULL
|
||||
|
||||
// Set types
|
||||
String_Set = 0x10, // SS
|
||||
Number_Set = 0x11, // NS
|
||||
Binary_Set = 0x12, // BS
|
||||
|
||||
// Complex types
|
||||
List = 0x20, // L
|
||||
Map = 0x21, // M
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Encoding (Item → Binary)
|
||||
// ============================================================================
|
||||
|
||||
// Encode an Item to binary TLV format
|
||||
// Format: [attribute_count:varint][attributes...]
|
||||
// Each attribute: [name_len:varint][name:bytes][type_tag:u8][value_encoded:bytes]
|
||||
encode :: proc(item: Item) -> ([]byte, bool) {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 1024, context.allocator)
|
||||
defer bytes.buffer_destroy(&buf)
|
||||
|
||||
// Write attribute count
|
||||
encode_varint(&buf, len(item))
|
||||
|
||||
// Collect and sort keys for deterministic encoding
|
||||
keys := make([dynamic]string, context.temp_allocator)
|
||||
for key in item {
|
||||
append(&keys, key)
|
||||
}
|
||||
|
||||
slice.sort_by(keys[:], proc(a, b: string) -> bool {
|
||||
return a < b
|
||||
})
|
||||
|
||||
// Encode each attribute
|
||||
for key in keys {
|
||||
value := item[key]
|
||||
|
||||
// Write attribute name
|
||||
encode_varint(&buf, len(key))
|
||||
bytes.buffer_write_string(&buf, key)
|
||||
|
||||
// Encode attribute value
|
||||
ok := encode_attribute_value(&buf, value)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.buffer_to_bytes(&buf), true
|
||||
}
|
||||
|
||||
// Encode an AttributeValue to binary format
|
||||
encode_attribute_value :: proc(buf: ^bytes.Buffer, attr: Attribute_Value) -> bool {
|
||||
switch v in attr {
|
||||
case String:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.String))
|
||||
encode_varint(buf, len(v))
|
||||
bytes.buffer_write_string(buf, string(v))
|
||||
|
||||
case DDB_Number:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Number))
|
||||
// Store as string in item encoding
|
||||
num_str := format_ddb_number(v)
|
||||
encode_varint(buf, len(num_str))
|
||||
bytes.buffer_write_string(buf, num_str)
|
||||
|
||||
case Binary:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Binary))
|
||||
encode_varint(buf, len(v))
|
||||
bytes.buffer_write_string(buf, string(v))
|
||||
|
||||
case Bool:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Boolean))
|
||||
bytes.buffer_write_byte(buf, 1 if bool(v) else 0)
|
||||
|
||||
case Null:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Null))
|
||||
// NULL has no value bytes
|
||||
|
||||
case DDB_Number_Set:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Number_Set)) // Use Number_Set tag, not DDB_Number_Set
|
||||
encode_varint(buf, len(v))
|
||||
for num in v {
|
||||
// Format the DDB_Number to a string
|
||||
num_str := format_ddb_number(num)
|
||||
encode_varint(buf, len(num_str))
|
||||
bytes.buffer_write_string(buf, num_str)
|
||||
}
|
||||
|
||||
case String_Set:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.String_Set))
|
||||
encode_varint(buf, len(v))
|
||||
for s in v {
|
||||
encode_varint(buf, len(s))
|
||||
bytes.buffer_write_string(buf, s)
|
||||
}
|
||||
|
||||
case Binary_Set:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Binary_Set))
|
||||
encode_varint(buf, len(v))
|
||||
for b in v {
|
||||
encode_varint(buf, len(b))
|
||||
bytes.buffer_write_string(buf, b)
|
||||
}
|
||||
|
||||
case List:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.List))
|
||||
encode_varint(buf, len(v))
|
||||
for item in v {
|
||||
ok := encode_attribute_value(buf, item)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
case Map:
|
||||
bytes.buffer_write_byte(buf, u8(Type_Tag.Map))
|
||||
encode_varint(buf, len(v))
|
||||
|
||||
// Collect and sort keys for deterministic encoding
|
||||
keys := make([dynamic]string, context.temp_allocator)
|
||||
for key in v {
|
||||
append(&keys, key)
|
||||
}
|
||||
|
||||
slice.sort_by(keys[:], proc(a, b: string) -> bool {
|
||||
return a < b
|
||||
})
|
||||
|
||||
// Encode each map entry
|
||||
for key in keys {
|
||||
value := v[key]
|
||||
encode_varint(buf, len(key))
|
||||
bytes.buffer_write_string(buf, key)
|
||||
ok := encode_attribute_value(buf, value)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Decoding (Binary → Item)
|
||||
// ============================================================================
|
||||
|
||||
// Binary decoder helper
|
||||
Binary_Decoder :: struct {
|
||||
data: []byte,
|
||||
pos: int,
|
||||
}
|
||||
|
||||
decoder_read_byte :: proc(decoder: ^Binary_Decoder) -> (u8, bool) {
|
||||
if decoder.pos >= len(decoder.data) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
byte := decoder.data[decoder.pos]
|
||||
decoder.pos += 1
|
||||
return byte, true
|
||||
}
|
||||
|
||||
decoder_read_bytes :: proc(decoder: ^Binary_Decoder, length: int) -> ([]byte, bool) {
|
||||
if decoder.pos + length > len(decoder.data) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
bytes := decoder.data[decoder.pos:decoder.pos + length]
|
||||
decoder.pos += length
|
||||
return bytes, true
|
||||
}
|
||||
|
||||
decoder_read_varint :: proc(decoder: ^Binary_Decoder) -> (int, bool) {
|
||||
result: int = 0
|
||||
shift: uint = 0
|
||||
|
||||
for decoder.pos < len(decoder.data) {
|
||||
byte := decoder.data[decoder.pos]
|
||||
decoder.pos += 1
|
||||
|
||||
result |= int(byte & 0x7F) << shift
|
||||
|
||||
if (byte & 0x80) == 0 {
|
||||
return result, true
|
||||
}
|
||||
|
||||
shift += 7
|
||||
if shift >= 64 {
|
||||
return 0, false // Varint overflow
|
||||
}
|
||||
}
|
||||
|
||||
return 0, false // Unexpected end of data
|
||||
}
|
||||
|
||||
// Decode binary TLV format back into an Item
|
||||
decode :: proc(data: []byte) -> (Item, bool) {
|
||||
decoder := Binary_Decoder{data = data, pos = 0}
|
||||
|
||||
attr_count, count_ok := decoder_read_varint(&decoder)
|
||||
if !count_ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
item := make(Item)
|
||||
|
||||
for _ in 0..<attr_count {
|
||||
// Read attribute name
|
||||
name_len, name_len_ok := decoder_read_varint(&decoder)
|
||||
if !name_len_ok {
|
||||
// Cleanup on error
|
||||
item_destroy(&item)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
name_bytes, name_ok := decoder_read_bytes(&decoder, name_len)
|
||||
if !name_ok {
|
||||
item_destroy(&item)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
owned_name := string(name_bytes)
|
||||
owned_name = transmute(string)slice.clone(transmute([]byte)owned_name)
|
||||
|
||||
// Read attribute value
|
||||
value, value_ok := decode_attribute_value(&decoder)
|
||||
if !value_ok {
|
||||
delete(owned_name)
|
||||
item_destroy(&item)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
item[owned_name] = value
|
||||
}
|
||||
|
||||
return item, true
|
||||
}
|
||||
|
||||
// Decode an AttributeValue from binary format
|
||||
decode_attribute_value :: proc(decoder: ^Binary_Decoder) -> (Attribute_Value, bool) {
|
||||
type_byte, type_ok := decoder_read_byte(decoder)
|
||||
if !type_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
type_tag := Type_Tag(type_byte)
|
||||
|
||||
switch type_tag {
|
||||
case .String:
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
if !len_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
data, data_ok := decoder_read_bytes(decoder, length)
|
||||
if !data_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
str := string(data)
|
||||
owned := transmute(string)slice.clone(transmute([]byte)str)
|
||||
return String(owned), true
|
||||
|
||||
case .Number:
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
if !len_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
data, data_ok := decoder_read_bytes(decoder, length)
|
||||
if !data_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
num_str := string(data)
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(num_str)
|
||||
if !num_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return ddb_num, true
|
||||
|
||||
case .Binary:
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
if !len_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
data, data_ok := decoder_read_bytes(decoder, length)
|
||||
if !data_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
str := string(data)
|
||||
owned := transmute(string)slice.clone(transmute([]byte)str)
|
||||
return Binary(owned), true
|
||||
|
||||
case .Boolean:
|
||||
byte, byte_ok := decoder_read_byte(decoder)
|
||||
if !byte_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return Bool(byte != 0), true
|
||||
|
||||
case .Null:
|
||||
return Null(true), true
|
||||
|
||||
case .String_Set:
|
||||
count, count_ok := decoder_read_varint(decoder)
|
||||
if !count_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
strings := make([]string, count)
|
||||
|
||||
for i in 0..<count {
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
if !len_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
delete(strings[j])
|
||||
}
|
||||
delete(strings)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
data, data_ok := decoder_read_bytes(decoder, length)
|
||||
if !data_ok {
|
||||
for j in 0..<i {
|
||||
delete(strings[j])
|
||||
}
|
||||
delete(strings)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
str := string(data)
|
||||
strings[i] = transmute(string)slice.clone(transmute([]byte)str)
|
||||
}
|
||||
|
||||
return String_Set(strings), true
|
||||
|
||||
case .Number_Set:
|
||||
count, count_ok := decoder_read_varint(decoder)
|
||||
if !count_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
numbers := make([]DDB_Number, count) // Changed to DDB_Number
|
||||
|
||||
for i in 0..<count {
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
if !len_ok {
|
||||
// No cleanup needed for DDB_Number (no heap allocations)
|
||||
delete(numbers)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
data, data_ok := decoder_read_bytes(decoder, length)
|
||||
if !data_ok {
|
||||
delete(numbers)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
num_str := string(data)
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(num_str)
|
||||
if !num_ok {
|
||||
delete(numbers)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
numbers[i] = ddb_num
|
||||
}
|
||||
|
||||
return DDB_Number_Set(numbers), true
|
||||
|
||||
case .Binary_Set:
|
||||
count, count_ok := decoder_read_varint(decoder)
|
||||
if !count_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
binaries := make([]string, count)
|
||||
|
||||
for i in 0..<count {
|
||||
length, len_ok := decoder_read_varint(decoder)
|
||||
if !len_ok {
|
||||
for j in 0..<i {
|
||||
delete(binaries[j])
|
||||
}
|
||||
delete(binaries)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
data, data_ok := decoder_read_bytes(decoder, length)
|
||||
if !data_ok {
|
||||
for j in 0..<i {
|
||||
delete(binaries[j])
|
||||
}
|
||||
delete(binaries)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
str := string(data)
|
||||
binaries[i] = transmute(string)slice.clone(transmute([]byte)str)
|
||||
}
|
||||
|
||||
return Binary_Set(binaries), true
|
||||
|
||||
case .List:
|
||||
count, count_ok := decoder_read_varint(decoder)
|
||||
if !count_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
list := make([]Attribute_Value, count)
|
||||
|
||||
for i in 0..<count {
|
||||
value, value_ok := decode_attribute_value(decoder)
|
||||
if !value_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
item := list[j]
|
||||
attr_value_destroy(&item)
|
||||
}
|
||||
delete(list)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
list[i] = value
|
||||
}
|
||||
|
||||
return List(list), true
|
||||
|
||||
case .Map:
|
||||
count, count_ok := decoder_read_varint(decoder)
|
||||
if !count_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
attr_map := make(map[string]Attribute_Value)
|
||||
|
||||
for _ in 0..<count {
|
||||
// Read key
|
||||
key_len, key_len_ok := decoder_read_varint(decoder)
|
||||
if !key_len_ok {
|
||||
// Cleanup on error
|
||||
for k, v in attr_map {
|
||||
delete(k)
|
||||
v_copy := v
|
||||
attr_value_destroy(&v_copy)
|
||||
}
|
||||
delete(attr_map)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
key_bytes, key_ok := decoder_read_bytes(decoder, key_len)
|
||||
if !key_ok {
|
||||
for k, v in attr_map {
|
||||
delete(k)
|
||||
v_copy := v
|
||||
attr_value_destroy(&v_copy)
|
||||
}
|
||||
delete(attr_map)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
key := string(key_bytes)
|
||||
owned_key := transmute(string)slice.clone(transmute([]byte)key)
|
||||
|
||||
// Read value
|
||||
value, value_ok := decode_attribute_value(decoder)
|
||||
if !value_ok {
|
||||
delete(owned_key)
|
||||
for k, v in attr_map {
|
||||
delete(k)
|
||||
v_copy := v
|
||||
attr_value_destroy(&v_copy)
|
||||
}
|
||||
delete(attr_map)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
attr_map[owned_key] = value
|
||||
}
|
||||
|
||||
return Map(attr_map), true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Varint Encoding (Encodes a varint length prefix)
|
||||
// ============================================================================
|
||||
|
||||
encode_varint :: proc(buf: ^bytes.Buffer, value: int) {
|
||||
v := value
|
||||
for {
|
||||
byte := u8(v & 0x7F)
|
||||
v >>= 7
|
||||
|
||||
if v == 0 {
|
||||
bytes.buffer_write_byte(buf, byte)
|
||||
return
|
||||
} else {
|
||||
bytes.buffer_write_byte(buf, byte | 0x80)
|
||||
}
|
||||
}
|
||||
}
|
||||
820
dynamodb/json.odin
Normal file
820
dynamodb/json.odin
Normal file
@@ -0,0 +1,820 @@
|
||||
// DynamoDB JSON parsing and serialization
|
||||
// Pure functions for converting between DynamoDB JSON format and internal types
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:fmt"
|
||||
import "core:slice"
|
||||
import "core:strings"
|
||||
|
||||
// ============================================================================
|
||||
// Parsing (JSON → Types)
|
||||
// ============================================================================
|
||||
|
||||
// Parse DynamoDB JSON format into an Item
|
||||
// Caller owns returned Item
|
||||
parse_item :: proc(json_bytes: []byte) -> (Item, bool) {
|
||||
data, parse_err := json.parse(json_bytes, allocator = context.allocator)
|
||||
if parse_err != nil {
|
||||
return {}, false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
return parse_item_from_value(data)
|
||||
}
|
||||
|
||||
// Parse an Item from an already-parsed JSON Value
|
||||
// More efficient when you already have a Value (e.g., from request body parsing)
|
||||
parse_item_from_value :: proc(value: json.Value) -> (Item, bool) {
|
||||
obj, ok := value.(json.Object)
|
||||
if !ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
item := make(Item)
|
||||
|
||||
for key, val in obj {
|
||||
attr_name := strings.clone(key)
|
||||
|
||||
attr_value, attr_ok := parse_attribute_value(val)
|
||||
if !attr_ok {
|
||||
// Cleanup on error
|
||||
for k, v in item {
|
||||
delete(k)
|
||||
v_copy := v
|
||||
attr_value_destroy(&v_copy)
|
||||
}
|
||||
delete(item)
|
||||
delete(attr_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
item[attr_name] = attr_value
|
||||
}
|
||||
|
||||
return item, true
|
||||
}
|
||||
|
||||
// Parse a single DynamoDB AttributeValue from JSON
|
||||
// Format: {"S": "value"}, {"N": "123"}, {"M": {...}}, etc.
|
||||
parse_attribute_value :: proc(value: json.Value) -> (Attribute_Value, bool) {
|
||||
obj, ok := value.(json.Object)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// DynamoDB attribute must have exactly one key (the type indicator)
|
||||
if len(obj) != 1 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Get the single key-value pair
|
||||
for type_name, type_value in obj {
|
||||
// String
|
||||
if type_name == "S" {
|
||||
str, str_ok := type_value.(json.String)
|
||||
if !str_ok {
|
||||
return nil, false
|
||||
}
|
||||
return String(strings.clone(string(str))), true
|
||||
}
|
||||
|
||||
// Number (stored as string)
|
||||
if type_name == "N" {
|
||||
str, str_ok := type_value.(json.String)
|
||||
if !str_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(string(str))
|
||||
if !num_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Clone the string fields since they're slices of the input
|
||||
owned_num := clone_ddb_number(ddb_num)
|
||||
return owned_num, true
|
||||
}
|
||||
|
||||
// Binary (base64 string)
|
||||
if type_name == "B" {
|
||||
str, str_ok := type_value.(json.String)
|
||||
if !str_ok {
|
||||
return nil, false
|
||||
}
|
||||
return Binary(strings.clone(string(str))), true
|
||||
}
|
||||
|
||||
// Boolean
|
||||
if type_name == "BOOL" {
|
||||
b, b_ok := type_value.(json.Boolean)
|
||||
if !b_ok {
|
||||
return nil, false
|
||||
}
|
||||
return Bool(b), true
|
||||
}
|
||||
|
||||
// Null
|
||||
if type_name == "NULL" {
|
||||
b, b_ok := type_value.(json.Boolean)
|
||||
if !b_ok {
|
||||
return nil, false
|
||||
}
|
||||
return Null(b), true
|
||||
}
|
||||
|
||||
// String Set
|
||||
if type_name == "SS" {
|
||||
arr, arr_ok := type_value.(json.Array)
|
||||
if !arr_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
strings_arr := make([]string, len(arr))
|
||||
|
||||
for item, i in arr {
|
||||
str, str_ok := item.(json.String)
|
||||
if !str_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
delete(strings_arr[j])
|
||||
}
|
||||
delete(strings_arr)
|
||||
return nil, false
|
||||
}
|
||||
strings_arr[i] = strings.clone(string(str))
|
||||
}
|
||||
|
||||
return String_Set(strings_arr), true
|
||||
}
|
||||
|
||||
// Number Set
|
||||
if type_name == "NS" {
|
||||
arr, arr_ok := type_value.(json.Array)
|
||||
if !arr_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
numbers_arr := make([]DDB_Number, len(arr))
|
||||
|
||||
for item, i in arr {
|
||||
str, str_ok := item.(json.String)
|
||||
if !str_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
// Clean up DDB_Numbers
|
||||
delete(numbers_arr[j].integer_part)
|
||||
delete(numbers_arr[j].fractional_part)
|
||||
}
|
||||
delete(numbers_arr)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Parse into DDB_Number
|
||||
ddb_num, num_ok := parse_ddb_number(string(str))
|
||||
if !num_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
delete(numbers_arr[j].integer_part)
|
||||
delete(numbers_arr[j].fractional_part)
|
||||
}
|
||||
delete(numbers_arr)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Clone and store
|
||||
numbers_arr[i] = clone_ddb_number(ddb_num)
|
||||
}
|
||||
|
||||
return DDB_Number_Set(numbers_arr), true
|
||||
}
|
||||
|
||||
// Binary Set
|
||||
if type_name == "BS" {
|
||||
arr, arr_ok := type_value.(json.Array)
|
||||
if !arr_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
binaries_arr := make([]string, len(arr))
|
||||
|
||||
for item, i in arr {
|
||||
str, str_ok := item.(json.String)
|
||||
if !str_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
delete(binaries_arr[j])
|
||||
}
|
||||
delete(binaries_arr)
|
||||
return nil, false
|
||||
}
|
||||
binaries_arr[i] = strings.clone(string(str))
|
||||
}
|
||||
|
||||
return Binary_Set(binaries_arr), true
|
||||
}
|
||||
|
||||
// List
|
||||
if type_name == "L" {
|
||||
arr, arr_ok := type_value.(json.Array)
|
||||
if !arr_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
list := make([]Attribute_Value, len(arr))
|
||||
|
||||
for item, i in arr {
|
||||
val, val_ok := parse_attribute_value(item)
|
||||
if !val_ok {
|
||||
// Cleanup on error
|
||||
for j in 0..<i {
|
||||
item_copy := list[j]
|
||||
attr_value_destroy(&item_copy)
|
||||
}
|
||||
delete(list)
|
||||
return nil, false
|
||||
}
|
||||
list[i] = val
|
||||
}
|
||||
|
||||
return List(list), true
|
||||
}
|
||||
|
||||
// Map
|
||||
if type_name == "M" {
|
||||
map_obj, map_ok := type_value.(json.Object)
|
||||
if !map_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
attr_map := make(map[string]Attribute_Value)
|
||||
|
||||
for map_key, map_val in map_obj {
|
||||
key := strings.clone(map_key)
|
||||
|
||||
val, val_ok := parse_attribute_value(map_val)
|
||||
if !val_ok {
|
||||
// Cleanup on error
|
||||
delete(key)
|
||||
for k, v in attr_map {
|
||||
delete(k)
|
||||
v_copy := v
|
||||
attr_value_destroy(&v_copy)
|
||||
}
|
||||
delete(attr_map)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
attr_map[key] = val
|
||||
}
|
||||
|
||||
return Map(attr_map), true
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Serialization (Types → JSON)
|
||||
// ============================================================================
|
||||
|
||||
// Serialize an Item to canonical DynamoDB JSON format
|
||||
// Keys are sorted alphabetically for deterministic output
|
||||
serialize_item :: proc(item: Item) -> string {
|
||||
builder := strings.builder_make()
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
serialize_item_to_builder(&builder, item)
|
||||
|
||||
return strings.clone(strings.to_string(builder))
|
||||
}
|
||||
|
||||
// Serialize an Item to a strings.Builder with deterministic ordering
|
||||
serialize_item_to_builder :: proc(b: ^strings.Builder, item: Item) {
|
||||
// Collect and sort keys for deterministic output
|
||||
keys := make([dynamic]string, context.temp_allocator)
|
||||
defer delete(keys)
|
||||
|
||||
for key in item {
|
||||
append(&keys, key)
|
||||
}
|
||||
|
||||
// Sort keys alphabetically
|
||||
slice.sort_by(keys[:], proc(a, b: string) -> bool {
|
||||
return a < b
|
||||
})
|
||||
|
||||
strings.write_string(b, "{")
|
||||
for key, i in keys {
|
||||
if i > 0 {
|
||||
strings.write_string(b, ",")
|
||||
}
|
||||
fmt.sbprintf(b, `"%s":`, key)
|
||||
value := item[key]
|
||||
serialize_attribute_value(b, value)
|
||||
}
|
||||
strings.write_string(b, "}")
|
||||
}
|
||||
|
||||
// Serialize an AttributeValue to DynamoDB JSON format
|
||||
serialize_attribute_value :: proc(b: ^strings.Builder, attr: Attribute_Value) {
|
||||
switch v in attr {
|
||||
case String:
|
||||
strings.write_string(b, `{"S":"`)
|
||||
strings.write_string(b, string(v))
|
||||
strings.write_string(b, `"}`)
|
||||
|
||||
case DDB_Number:
|
||||
num_str := format_ddb_number(v)
|
||||
strings.write_string(b, `{"N":"`)
|
||||
strings.write_string(b, num_str)
|
||||
strings.write_string(b, `"}`)
|
||||
|
||||
case Binary:
|
||||
strings.write_string(b, `{"B":"`)
|
||||
strings.write_string(b, string(v))
|
||||
strings.write_string(b, `"}`)
|
||||
|
||||
case Bool:
|
||||
strings.write_string(b, `{"BOOL":`)
|
||||
if bool(v) { strings.write_string(b, "true") } else { strings.write_string(b, "false") }
|
||||
strings.write_string(b, "}")
|
||||
|
||||
case Null:
|
||||
strings.write_string(b, `{"NULL":true}`)
|
||||
|
||||
case String_Set:
|
||||
strings.write_string(b, `{"SS":[`)
|
||||
for s, i in v {
|
||||
if i > 0 {
|
||||
strings.write_string(b, ",")
|
||||
}
|
||||
fmt.sbprintf(b, `"%s"`, s)
|
||||
}
|
||||
strings.write_string(b, "]}")
|
||||
|
||||
case DDB_Number_Set:
|
||||
strings.write_string(b, `{"NS":[`)
|
||||
for num, i in v {
|
||||
if i > 0 {
|
||||
strings.write_string(b, ",")
|
||||
}
|
||||
num_str := format_ddb_number(num)
|
||||
fmt.sbprintf(b, `"%s"`, num_str)
|
||||
}
|
||||
strings.write_string(b, "]}")
|
||||
|
||||
case Binary_Set:
|
||||
strings.write_string(b, `{"BS":[`)
|
||||
for bin, i in v {
|
||||
if i > 0 {
|
||||
strings.write_string(b, ",")
|
||||
}
|
||||
fmt.sbprintf(b, `"%s"`, bin)
|
||||
}
|
||||
strings.write_string(b, "]}")
|
||||
|
||||
case List:
|
||||
strings.write_string(b, `{"L":[`)
|
||||
for item, i in v {
|
||||
if i > 0 {
|
||||
strings.write_string(b, ",")
|
||||
}
|
||||
serialize_attribute_value(b, item)
|
||||
}
|
||||
strings.write_string(b, "]}")
|
||||
|
||||
case Map:
|
||||
strings.write_string(b, `{"M":{`)
|
||||
|
||||
// Collect and sort keys for deterministic output
|
||||
keys := make([dynamic]string, context.temp_allocator)
|
||||
for key in v {
|
||||
append(&keys, key)
|
||||
}
|
||||
|
||||
slice.sort_by(keys[:], proc(a, b: string) -> bool {
|
||||
return a < b
|
||||
})
|
||||
|
||||
for key, i in keys {
|
||||
if i > 0 {
|
||||
strings.write_string(b, ",")
|
||||
}
|
||||
fmt.sbprintf(b, `"%s":`, key)
|
||||
value := v[key]
|
||||
serialize_attribute_value(b, value)
|
||||
}
|
||||
|
||||
strings.write_string(b, "}}")
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Request Parsing Helpers
|
||||
// ============================================================================
|
||||
|
||||
// Extract table name from request body
|
||||
parse_table_name :: proc(request_body: []byte) -> (string, bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return "", false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, ok := data.(json.Object)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
table_name_val, found := root["TableName"]
|
||||
if !found {
|
||||
return "", false
|
||||
}
|
||||
|
||||
table_name_str, str_ok := table_name_val.(json.String)
|
||||
if !str_ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return strings.clone(string(table_name_str)), true
|
||||
}
|
||||
|
||||
// Parse Item field from request body
|
||||
// Returns owned Item
|
||||
parse_item_from_request :: proc(request_body: []byte) -> (Item, bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return {}, false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, ok := data.(json.Object)
|
||||
if !ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
item_val, found := root["Item"]
|
||||
if !found {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return parse_item_from_value(item_val)
|
||||
}
|
||||
|
||||
// Parse Key field from request body
|
||||
// Returns owned Item representing the key
|
||||
parse_key_from_request :: proc(request_body: []byte) -> (Item, bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return {}, false
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, ok := data.(json.Object)
|
||||
if !ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
key_val, found := root["Key"]
|
||||
if !found {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return parse_item_from_value(key_val)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Pagination Helpers
|
||||
// ============================================================================
|
||||
|
||||
// Parse Limit from request body
|
||||
// Returns 0 if not present
|
||||
parse_limit :: proc(request_body: []byte) -> int {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return 0
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, ok := data.(json.Object)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
limit_val, found := root["Limit"]
|
||||
if !found {
|
||||
return 0
|
||||
}
|
||||
|
||||
// JSON numbers can be either Integer or Float
|
||||
#partial switch v in limit_val {
|
||||
case json.Integer:
|
||||
return int(v)
|
||||
case json.Float:
|
||||
return int(v)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ExclusiveStartKey Parsing (Pagination Input)
|
||||
//
|
||||
// Parse ExclusiveStartKey from request body. Requires key_schema so we can
|
||||
// validate and extract the key, then convert it to a binary storage key.
|
||||
// Returns the binary key bytes that can be passed straight to scan/query.
|
||||
// Returns nil (not an error) when the field is absent.
|
||||
// ============================================================================
|
||||
|
||||
// Returns (key, ok, body_parse_err).
|
||||
// ok=true, body_parse_err=false → key present and valid, or key absent (no pagination)
|
||||
// ok=false, body_parse_err=true → request body is not valid JSON or not an object
|
||||
// ok=false, body_parse_err=false → ExclusiveStartKey present but malformed/invalid
|
||||
parse_exclusive_start_key :: proc(
|
||||
request_body: []byte,
|
||||
table_name: string,
|
||||
key_schema: []Key_Schema_Element,
|
||||
) -> (result: Maybe([]byte), ok: bool, body_err: bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return nil, false, true // body is not valid JSON — real error
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return nil, false, true // root must be an object — real error
|
||||
}
|
||||
|
||||
esk_val, found := root["ExclusiveStartKey"]
|
||||
if !found {
|
||||
return nil, true, false // absent → no pagination, that's ok
|
||||
}
|
||||
|
||||
// Parse ExclusiveStartKey as a DynamoDB Item
|
||||
key_item, item_ok := parse_item_from_value(esk_val)
|
||||
if !item_ok {
|
||||
return nil, false, false // present but malformed → validation error
|
||||
}
|
||||
defer item_destroy(&key_item)
|
||||
|
||||
// Validate and extract key struct using schema
|
||||
key_struct, key_ok := key_from_item(key_item, key_schema)
|
||||
if !key_ok {
|
||||
return nil, false, false // missing required key attributes
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
// Get raw byte values
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return nil, false, false
|
||||
}
|
||||
|
||||
// Build binary storage key
|
||||
binary_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
result = binary_key
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
// parse_exclusive_start_key_gsi ... Just a helper for GSI keys
|
||||
// Returns (key, ok, body_parse_err) — same contract as parse_exclusive_start_key.
|
||||
parse_exclusive_start_key_gsi :: proc(
|
||||
request_body: []byte,
|
||||
table_name: string,
|
||||
metadata: ^Table_Metadata,
|
||||
gsi: ^Global_Secondary_Index,
|
||||
) -> (Maybe([]byte), bool, bool) {
|
||||
root, parse_err := json.parse(request_body)
|
||||
if parse_err != nil do return nil, false, true // body not valid JSON
|
||||
defer json.destroy_value(root)
|
||||
|
||||
obj, obj_ok := root.(json.Object)
|
||||
if !obj_ok do return nil, false, true // root must be an object
|
||||
|
||||
esk_val, has := obj["ExclusiveStartKey"]
|
||||
if !has do return nil, true, false // absent → no pagination
|
||||
|
||||
key_item, key_ok := parse_item_from_value(esk_val)
|
||||
if !key_ok do return nil, false, false
|
||||
defer item_destroy(&key_item)
|
||||
|
||||
idx_key, idx_ok := key_from_item(key_item, gsi.key_schema)
|
||||
if !idx_ok do return nil, false, false
|
||||
defer key_destroy(&idx_key)
|
||||
|
||||
idx_vals, idx_vals_ok := key_get_values(&idx_key)
|
||||
if !idx_vals_ok do return nil, false, false
|
||||
|
||||
base_key, base_ok := key_from_item(key_item, metadata.key_schema)
|
||||
if !base_ok do return nil, false, false
|
||||
defer key_destroy(&base_key)
|
||||
|
||||
base_vals, base_vals_ok := key_get_values(&base_key)
|
||||
if !base_vals_ok do return nil, false, false
|
||||
|
||||
k := build_gsi_key(
|
||||
table_name,
|
||||
gsi.index_name,
|
||||
idx_vals.pk,
|
||||
idx_vals.sk,
|
||||
base_vals.pk,
|
||||
base_vals.sk,
|
||||
)
|
||||
return k, true, false
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LastEvaluatedKey Generation (Pagination Output)
|
||||
//
|
||||
// Decode a binary storage key back into a DynamoDB JSON fragment suitable
|
||||
// for the "LastEvaluatedKey" field in scan/query responses.
|
||||
//
|
||||
// Steps:
|
||||
// 1. Decode the binary key → table_name, pk_bytes, sk_bytes
|
||||
// 2. Look up attribute types from metadata (S/N/B)
|
||||
// 3. Build a Key struct with correctly-typed AttributeValues
|
||||
// 4. Convert Key → Item → DynamoDB JSON string
|
||||
// ============================================================================
|
||||
|
||||
// Build a Key struct from a binary storage key using metadata for type info.
|
||||
// This mirrors the Zig buildKeyFromBinaryWithTypes helper.
|
||||
build_key_from_binary_with_types :: proc(
|
||||
binary_key: []byte,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> (key: Key, ok: bool) {
|
||||
decoder := Key_Decoder{data = binary_key, pos = 0}
|
||||
|
||||
// Skip entity type byte
|
||||
_ = decoder_read_entity_type(&decoder) or_return
|
||||
|
||||
// Skip table name segment
|
||||
_ = decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read partition key bytes
|
||||
pk_bytes := decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read sort key bytes if present
|
||||
sk_bytes: Maybe([]byte) = nil
|
||||
if decoder_has_more(&decoder) {
|
||||
sk := decoder_read_segment_borrowed(&decoder) or_return
|
||||
sk_bytes = sk
|
||||
}
|
||||
|
||||
// Get PK attribute type from metadata
|
||||
pk_name := table_metadata_get_partition_key_name(metadata).? or_return
|
||||
pk_type := table_metadata_get_attribute_type(metadata, pk_name).? or_return
|
||||
|
||||
pk_attr := build_attribute_value_with_type(pk_bytes, pk_type)
|
||||
|
||||
// Build SK attribute if present
|
||||
sk_attr: Maybe(Attribute_Value) = nil
|
||||
if sk, has_sk := sk_bytes.?; has_sk {
|
||||
sk_name := table_metadata_get_sort_key_name(metadata).? or_return
|
||||
sk_type := table_metadata_get_attribute_type(metadata, sk_name).? or_return
|
||||
sk_attr = build_attribute_value_with_type(sk, sk_type)
|
||||
}
|
||||
|
||||
return Key{pk = pk_attr, sk = sk_attr}, true
|
||||
}
|
||||
|
||||
// Serialize a binary storage key as a LastEvaluatedKey JSON fragment.
|
||||
// Returns a string like: {"pk":{"S":"val"},"sk":{"N":"42"}}
|
||||
serialize_last_evaluated_key :: proc(
|
||||
binary_key: []byte,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> (result: string, ok: bool) {
|
||||
key, key_ok := build_key_from_binary_with_types(binary_key, metadata)
|
||||
if !key_ok {
|
||||
return "", false
|
||||
}
|
||||
defer key_destroy(&key)
|
||||
|
||||
item := key_to_item(key, metadata.key_schema)
|
||||
defer item_destroy(&item)
|
||||
|
||||
return serialize_item(item), true
|
||||
}
|
||||
|
||||
Decoded_GSI_Key_Full :: struct {
|
||||
gsi_pk: []byte,
|
||||
gsi_sk: Maybe([]byte),
|
||||
base_pk: []byte,
|
||||
base_sk: Maybe([]byte),
|
||||
}
|
||||
|
||||
// Decode binary GSI key:
|
||||
//
|
||||
// [gsi][table_name][index_name][gsi_pk][gsi_sk?][base_pk][base_sk?]
|
||||
//
|
||||
// Presence of gsi_sk/base_sk depends on whether the index/table has a RANGE key.
|
||||
decode_gsi_key_full_borrowed :: proc(
|
||||
binary_key: []byte,
|
||||
gsi_has_sort_key: bool,
|
||||
table_has_sort_key: bool,
|
||||
) -> (result: Decoded_GSI_Key_Full, ok: bool) {
|
||||
decoder := Key_Decoder{data = binary_key, pos = 0}
|
||||
|
||||
et := decoder_read_entity_type(&decoder) or_return
|
||||
if et != .GSI {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Skip table name + index name
|
||||
_ = decoder_read_segment_borrowed(&decoder) or_return
|
||||
_ = decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read GSI PK
|
||||
result.gsi_pk = decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read GSI SK if index has one
|
||||
if gsi_has_sort_key {
|
||||
sk := decoder_read_segment_borrowed(&decoder) or_return
|
||||
result.gsi_sk = sk
|
||||
}
|
||||
|
||||
// Read base PK
|
||||
result.base_pk = decoder_read_segment_borrowed(&decoder) or_return
|
||||
|
||||
// Read base SK if table has one
|
||||
if table_has_sort_key {
|
||||
sk := decoder_read_segment_borrowed(&decoder) or_return
|
||||
result.base_sk = sk
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
|
||||
// Serialize a binary *GSI* key into a DynamoDB LastEvaluatedKey JSON object.
|
||||
// The output must include the *index* key attrs + the *base table* primary key attrs,
|
||||
// so boto can round-trip ExclusiveStartKey correctly.
|
||||
serialize_last_evaluated_key_gsi :: proc(
|
||||
binary_key: []byte,
|
||||
metadata: ^Table_Metadata,
|
||||
gsi: ^Global_Secondary_Index,
|
||||
) -> (result: string, ok: bool) {
|
||||
|
||||
// Determine whether index/table have range keys
|
||||
_, gsi_has_sk := gsi_get_sort_key_name(gsi).?
|
||||
_, tbl_has_sk := table_metadata_get_sort_key_name(metadata).?
|
||||
|
||||
decoded, dec_ok := decode_gsi_key_full_borrowed(binary_key, gsi_has_sk, tbl_has_sk)
|
||||
if !dec_ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Resolve key attribute names + types
|
||||
idx_pk_name := gsi_get_partition_key_name(gsi).? or_return
|
||||
idx_pk_type := table_metadata_get_attribute_type(metadata, idx_pk_name).? or_return
|
||||
|
||||
idx_sk_name: Maybe(string) = gsi_get_sort_key_name(gsi)
|
||||
idx_sk_type: Maybe(Scalar_Attribute_Type) = nil
|
||||
if n, has := idx_sk_name.?; has {
|
||||
idx_sk_type = table_metadata_get_attribute_type(metadata, n)
|
||||
}
|
||||
|
||||
base_pk_name := table_metadata_get_partition_key_name(metadata).? or_return
|
||||
base_pk_type := table_metadata_get_attribute_type(metadata, base_pk_name).? or_return
|
||||
|
||||
base_sk_name: Maybe(string) = table_metadata_get_sort_key_name(metadata)
|
||||
base_sk_type: Maybe(Scalar_Attribute_Type) = nil
|
||||
if n, has := base_sk_name.?; has {
|
||||
base_sk_type = table_metadata_get_attribute_type(metadata, n)
|
||||
}
|
||||
|
||||
// Build LEK item
|
||||
lek := make(Item)
|
||||
defer item_destroy(&lek)
|
||||
|
||||
add_attr_once :: proc(item: ^Item, name: string, raw: []byte, t: Scalar_Attribute_Type) {
|
||||
if _, exists := item^[name]; exists {
|
||||
return
|
||||
}
|
||||
item^[strings.clone(name)] = build_attribute_value_with_type(raw, t)
|
||||
}
|
||||
|
||||
// Index keys
|
||||
add_attr_once(&lek, idx_pk_name, decoded.gsi_pk, idx_pk_type)
|
||||
|
||||
if sk_raw, has := decoded.gsi_sk.?; has {
|
||||
skn := idx_sk_name.? or_return
|
||||
skt := idx_sk_type.? or_return
|
||||
add_attr_once(&lek, skn, sk_raw, skt)
|
||||
}
|
||||
|
||||
// Base table keys
|
||||
add_attr_once(&lek, base_pk_name, decoded.base_pk, base_pk_type)
|
||||
|
||||
if sk_raw, has := decoded.base_sk.?; has {
|
||||
skn := base_sk_name.? or_return
|
||||
skt := base_sk_type.? or_return
|
||||
add_attr_once(&lek, skn, sk_raw, skt)
|
||||
}
|
||||
|
||||
return serialize_item(lek), true
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package key_codec
|
||||
package dynamodb
|
||||
|
||||
import "core:bytes"
|
||||
import "core:encoding/varint"
|
||||
import "core:mem"
|
||||
|
||||
// Entity type prefix bytes for namespacing
|
||||
Entity_Type :: enum u8 {
|
||||
@@ -12,28 +10,52 @@ Entity_Type :: enum u8 {
|
||||
LSI = 0x04, // Local secondary index
|
||||
}
|
||||
|
||||
// Encode a varint length prefix
|
||||
encode_varint :: proc(buf: ^bytes.Buffer, value: int) {
|
||||
temp: [10]byte
|
||||
n := varint.encode_u64(temp[:], u64(value))
|
||||
bytes.buffer_write(buf, temp[:n])
|
||||
}
|
||||
|
||||
// Decode a varint length prefix
|
||||
// Decode a varint length prefix from a byte slice.
|
||||
// Reads starting at data[offset^] and advances offset^ past the varint on success.
|
||||
decode_varint :: proc(data: []byte, offset: ^int) -> (value: int, ok: bool) {
|
||||
if offset^ >= len(data) {
|
||||
i := offset^
|
||||
if i < 0 || i >= len(data) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
val, n := varint.decode_u64(data[offset^:])
|
||||
if n <= 0 {
|
||||
u: u64 = 0
|
||||
shift: u32 = 0
|
||||
|
||||
for {
|
||||
if i >= len(data) {
|
||||
return 0, false // truncated
|
||||
}
|
||||
|
||||
b := data[i]
|
||||
i += 1
|
||||
|
||||
u |= u64(b & 0x7F) << shift
|
||||
|
||||
if (b & 0x80) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
shift += 7
|
||||
if shift >= 64 {
|
||||
return 0, false // malformed / overflow
|
||||
}
|
||||
}
|
||||
|
||||
// ensure it fits in int on this platform
|
||||
max_int := int((~uint(0)) >> 1)
|
||||
if u > u64(max_int) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
offset^ += n
|
||||
return int(val), true
|
||||
offset^ = i
|
||||
return int(u), true
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Build metadata key: [meta][table_name]
|
||||
build_meta_key :: proc(table_name: string) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
@@ -108,32 +130,43 @@ build_partition_prefix :: proc(table_name: string, pk_value: []byte) -> []byte {
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Build GSI key: [gsi][table_name][index_name][gsi_pk][gsi_sk?]
|
||||
build_gsi_key :: proc(table_name: string, index_name: string, gsi_pk: []byte, gsi_sk: Maybe([]byte)) -> []byte {
|
||||
// Build GSI key: [gsi][table_name][index_name][gsi_pk][gsi_sk?][base_pk][base_sk?]
|
||||
build_gsi_key :: proc(
|
||||
table_name: string,
|
||||
index_name: string,
|
||||
gsi_pk: []byte,
|
||||
gsi_sk: Maybe([]byte),
|
||||
base_pk: []byte,
|
||||
base_sk: Maybe([]byte),
|
||||
) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 512, context.allocator)
|
||||
|
||||
// Write entity type
|
||||
bytes.buffer_write_byte(&buf, u8(Entity_Type.GSI))
|
||||
|
||||
// Write table name
|
||||
encode_varint(&buf, len(table_name))
|
||||
bytes.buffer_write_string(&buf, table_name)
|
||||
|
||||
// Write index name
|
||||
encode_varint(&buf, len(index_name))
|
||||
bytes.buffer_write_string(&buf, index_name)
|
||||
|
||||
// Write GSI partition key
|
||||
encode_varint(&buf, len(gsi_pk))
|
||||
bytes.buffer_write(&buf, gsi_pk)
|
||||
|
||||
// Write GSI sort key if present
|
||||
if sk, ok := gsi_sk.?; ok {
|
||||
encode_varint(&buf, len(sk))
|
||||
bytes.buffer_write(&buf, sk)
|
||||
}
|
||||
|
||||
// tie-breaker: base table primary key
|
||||
encode_varint(&buf, len(base_pk))
|
||||
bytes.buffer_write(&buf, base_pk)
|
||||
|
||||
if sk, ok := base_sk.?; ok {
|
||||
encode_varint(&buf, len(sk))
|
||||
bytes.buffer_write(&buf, sk)
|
||||
}
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
@@ -170,10 +203,6 @@ Key_Decoder :: struct {
|
||||
pos: int,
|
||||
}
|
||||
|
||||
decoder_init :: proc(data: []byte) -> Key_Decoder {
|
||||
return Key_Decoder{data = data, pos = 0}
|
||||
}
|
||||
|
||||
decoder_read_entity_type :: proc(decoder: ^Key_Decoder) -> (Entity_Type, bool) {
|
||||
if decoder.pos >= len(decoder.data) {
|
||||
return .Meta, false
|
||||
@@ -228,7 +257,7 @@ Decoded_Data_Key :: struct {
|
||||
}
|
||||
|
||||
decode_data_key :: proc(key: []byte) -> (result: Decoded_Data_Key, ok: bool) {
|
||||
decoder := decoder_init(key)
|
||||
decoder := Key_Decoder{data = key, pos = 0}
|
||||
|
||||
// Read and verify entity type
|
||||
entity_type := decoder_read_entity_type(&decoder) or_return
|
||||
94
dynamodb/key_codec_gsi.odin
Normal file
94
dynamodb/key_codec_gsi.odin
Normal file
@@ -0,0 +1,94 @@
|
||||
// key_codec_gsi.odin — Additional key codec functions for GSI support
|
||||
//
|
||||
// These procedures complement key_codec.odin with prefix builders needed
|
||||
// for GSI scanning and querying. They follow the same encoding conventions:
|
||||
// [entity_type][varint_len][segment_bytes]...
|
||||
//
|
||||
// Add the contents of this file to key_codec.odin (or keep as a separate file
|
||||
// in the dynamodb/ package).
|
||||
package dynamodb
|
||||
|
||||
import "core:bytes"
|
||||
|
||||
// Build GSI index prefix for scanning all entries in a GSI:
|
||||
// [gsi][table_name][index_name]
|
||||
build_gsi_prefix :: proc(table_name: string, index_name: string) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 256, context.allocator)
|
||||
|
||||
bytes.buffer_write_byte(&buf, u8(Entity_Type.GSI))
|
||||
|
||||
encode_varint(&buf, len(table_name))
|
||||
bytes.buffer_write_string(&buf, table_name)
|
||||
|
||||
encode_varint(&buf, len(index_name))
|
||||
bytes.buffer_write_string(&buf, index_name)
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Build GSI partition prefix for querying within a single partition:
|
||||
// [gsi][table_name][index_name][pk_value]
|
||||
build_gsi_partition_prefix :: proc(table_name: string, index_name: string, pk_value: []byte) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 512, context.allocator)
|
||||
|
||||
bytes.buffer_write_byte(&buf, u8(Entity_Type.GSI))
|
||||
|
||||
encode_varint(&buf, len(table_name))
|
||||
bytes.buffer_write_string(&buf, table_name)
|
||||
|
||||
encode_varint(&buf, len(index_name))
|
||||
bytes.buffer_write_string(&buf, index_name)
|
||||
|
||||
encode_varint(&buf, len(pk_value))
|
||||
bytes.buffer_write(&buf, pk_value)
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Decode a GSI key back into components
|
||||
Decoded_GSI_Key :: struct {
|
||||
table_name: string,
|
||||
index_name: string,
|
||||
pk_value: []byte,
|
||||
sk_value: Maybe([]byte),
|
||||
}
|
||||
|
||||
decode_gsi_key :: proc(key: []byte) -> (result: Decoded_GSI_Key, ok: bool) {
|
||||
decoder := Key_Decoder{data = key, pos = 0}
|
||||
|
||||
entity_type := decoder_read_entity_type(&decoder) or_return
|
||||
if entity_type != .GSI {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
table_name_bytes := decoder_read_segment(&decoder) or_return
|
||||
result.table_name = string(table_name_bytes)
|
||||
|
||||
index_name_bytes := decoder_read_segment(&decoder) or_return
|
||||
result.index_name = string(index_name_bytes)
|
||||
|
||||
result.pk_value = decoder_read_segment(&decoder) or_return
|
||||
|
||||
if decoder_has_more(&decoder) {
|
||||
sk := decoder_read_segment(&decoder) or_return
|
||||
result.sk_value = sk
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
// Build GSI prefix for deleting all GSI entries for a table (used by delete_table)
|
||||
// [gsi][table_name]
|
||||
build_gsi_table_prefix :: proc(table_name: string) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 256, context.allocator)
|
||||
|
||||
bytes.buffer_write_byte(&buf, u8(Entity_Type.GSI))
|
||||
|
||||
encode_varint(&buf, len(table_name))
|
||||
bytes.buffer_write_string(&buf, table_name)
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
796
dynamodb/number.odin
Normal file
796
dynamodb/number.odin
Normal file
@@ -0,0 +1,796 @@
|
||||
package dynamodb
|
||||
|
||||
import "core:fmt"
|
||||
import "core:strconv"
|
||||
import "core:strings"
|
||||
import "core:bytes"
|
||||
|
||||
// ============================================================================
|
||||
// DynamoDB Number Type
|
||||
//
|
||||
// DynamoDB numbers are arbitrary-precision decimals with up to 38 digits of
|
||||
// precision. They can be positive, negative, or zero.
|
||||
//
|
||||
// We store numbers internally as:
|
||||
// - sign: bool (true = positive/zero, false = negative)
|
||||
// - integer_part: string (digits only, no sign)
|
||||
// - fractional_part: string (digits only, if any)
|
||||
// - exponent: i32 (for scientific notation, if needed)
|
||||
//
|
||||
// This preserves the original precision and allows proper ordering.
|
||||
// ============================================================================
|
||||
|
||||
DDB_Number :: struct {
|
||||
sign: bool, // true = positive/zero, false = negative
|
||||
integer_part: string, // digits only (e.g., "123")
|
||||
fractional_part: string, // digits only (e.g., "456" for .456)
|
||||
exponent: i32, // scientific notation exponent (usually 0)
|
||||
}
|
||||
|
||||
// Parse a number string into DDB_Number
|
||||
// Supports formats: "123", "-123", "123.456", "1.23e10", "-1.23e-5"
|
||||
parse_ddb_number :: proc(s: string) -> (DDB_Number, bool) {
|
||||
if len(s) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
num: DDB_Number
|
||||
str := s
|
||||
|
||||
// Parse sign
|
||||
if str[0] == '-' {
|
||||
num.sign = false
|
||||
str = str[1:]
|
||||
} else if str[0] == '+' {
|
||||
num.sign = true
|
||||
str = str[1:]
|
||||
} else {
|
||||
num.sign = true
|
||||
}
|
||||
|
||||
if len(str) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Find exponent if present (e or E)
|
||||
exp_pos := -1
|
||||
for i in 0..<len(str) {
|
||||
if str[i] == 'e' || str[i] == 'E' {
|
||||
exp_pos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Parse mantissa
|
||||
mantissa := str
|
||||
if exp_pos >= 0 {
|
||||
mantissa = str[:exp_pos]
|
||||
exp_str := str[exp_pos+1:]
|
||||
exp_val, exp_ok := strconv.parse_i64(exp_str)
|
||||
if !exp_ok {
|
||||
return {}, false
|
||||
}
|
||||
num.exponent = i32(exp_val)
|
||||
}
|
||||
|
||||
// Find decimal point
|
||||
dot_pos := -1
|
||||
for i in 0..<len(mantissa) {
|
||||
if mantissa[i] == '.' {
|
||||
dot_pos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Parse integer and fractional parts
|
||||
if dot_pos >= 0 {
|
||||
num.integer_part = mantissa[:dot_pos]
|
||||
num.fractional_part = mantissa[dot_pos+1:]
|
||||
|
||||
// Validate fractional part
|
||||
for c in num.fractional_part {
|
||||
if c < '0' || c > '9' {
|
||||
return {}, false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
num.integer_part = mantissa
|
||||
}
|
||||
|
||||
// Validate integer part (at least one digit, all digits)
|
||||
if len(num.integer_part) == 0 {
|
||||
num.integer_part = "0"
|
||||
}
|
||||
for c in num.integer_part {
|
||||
if c < '0' || c > '9' {
|
||||
return {}, false
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize: remove leading zeros from integer part (except if it's just "0")
|
||||
num = normalize_ddb_number(num)
|
||||
|
||||
// Check precision (DynamoDB supports up to 38 digits)
|
||||
total_digits := len(num.integer_part) + len(num.fractional_part)
|
||||
if total_digits > 38 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Special case: if the number is zero
|
||||
if is_ddb_number_zero(num) {
|
||||
num.sign = true
|
||||
num.exponent = 0
|
||||
}
|
||||
|
||||
return num, true
|
||||
}
|
||||
|
||||
// Normalize a DDB_Number (remove leading zeros, trailing fractional zeros)
|
||||
normalize_ddb_number :: proc(num: DDB_Number) -> DDB_Number {
|
||||
result := num
|
||||
|
||||
// Remove leading zeros from integer part
|
||||
int_part := num.integer_part
|
||||
for len(int_part) > 1 && int_part[0] == '0' {
|
||||
int_part = int_part[1:]
|
||||
}
|
||||
result.integer_part = int_part
|
||||
|
||||
// Remove trailing zeros from fractional part
|
||||
frac_part := num.fractional_part
|
||||
for len(frac_part) > 0 && frac_part[len(frac_part)-1] == '0' {
|
||||
frac_part = frac_part[:len(frac_part)-1]
|
||||
}
|
||||
result.fractional_part = frac_part
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Check if a DDB_Number represents zero
|
||||
is_ddb_number_zero :: proc(num: DDB_Number) -> bool {
|
||||
// Check if integer part is all zeros
|
||||
for c in num.integer_part {
|
||||
if c != '0' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Check if fractional part is all zeros
|
||||
for c in num.fractional_part {
|
||||
if c != '0' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Convert DDB_Number to string representation
|
||||
ddb_number_to_string :: proc(num: DDB_Number) -> string {
|
||||
builder := strings.builder_make()
|
||||
|
||||
if !num.sign {
|
||||
strings.write_string(&builder, "-")
|
||||
}
|
||||
|
||||
strings.write_string(&builder, num.integer_part)
|
||||
|
||||
if len(num.fractional_part) > 0 {
|
||||
strings.write_string(&builder, ".")
|
||||
strings.write_string(&builder, num.fractional_part)
|
||||
}
|
||||
|
||||
if num.exponent != 0 {
|
||||
fmt.sbprintf(&builder, "e%d", num.exponent)
|
||||
}
|
||||
|
||||
return strings.to_string(builder)
|
||||
}
|
||||
|
||||
// Compare two DDB_Numbers
|
||||
// Returns: -1 if a < b, 0 if a == b, 1 if a > b
|
||||
compare_ddb_numbers :: proc(a: DDB_Number, b: DDB_Number) -> int {
|
||||
// Handle zero cases
|
||||
a_zero := is_ddb_number_zero(a)
|
||||
b_zero := is_ddb_number_zero(b)
|
||||
|
||||
if a_zero && b_zero {
|
||||
return 0
|
||||
}
|
||||
if a_zero {
|
||||
return b.sign ? -1 : 1 // 0 < positive, 0 > negative
|
||||
}
|
||||
if b_zero {
|
||||
return a.sign ? 1 : -1 // positive > 0, negative < 0
|
||||
}
|
||||
|
||||
// Different signs
|
||||
if a.sign != b.sign {
|
||||
return a.sign ? 1 : -1 // positive > negative
|
||||
}
|
||||
|
||||
// Same sign - compare magnitudes
|
||||
mag_cmp := compare_ddb_number_magnitudes(a, b)
|
||||
|
||||
// If negative, reverse the comparison
|
||||
if !a.sign {
|
||||
return -mag_cmp
|
||||
}
|
||||
return mag_cmp
|
||||
}
|
||||
|
||||
// Compare magnitudes (absolute values) of two DDB_Numbers
|
||||
compare_ddb_number_magnitudes :: proc(a: DDB_Number, b: DDB_Number) -> int {
|
||||
// Adjust for exponents first
|
||||
a_adj := adjust_for_exponent(a)
|
||||
b_adj := adjust_for_exponent(b)
|
||||
|
||||
// Compare integer parts length
|
||||
if len(a_adj.integer_part) != len(b_adj.integer_part) {
|
||||
return len(a_adj.integer_part) > len(b_adj.integer_part) ? 1 : -1
|
||||
}
|
||||
|
||||
// Compare integer parts digit by digit
|
||||
for i in 0..<len(a_adj.integer_part) {
|
||||
if a_adj.integer_part[i] != b_adj.integer_part[i] {
|
||||
return a_adj.integer_part[i] > b_adj.integer_part[i] ? 1 : -1
|
||||
}
|
||||
}
|
||||
|
||||
// Integer parts equal, compare fractional parts
|
||||
max_frac_len := max(len(a_adj.fractional_part), len(b_adj.fractional_part))
|
||||
for i in 0..<max_frac_len {
|
||||
a_digit := i < len(a_adj.fractional_part) ? a_adj.fractional_part[i] : '0'
|
||||
b_digit := i < len(b_adj.fractional_part) ? b_adj.fractional_part[i] : '0'
|
||||
|
||||
if a_digit != b_digit {
|
||||
return a_digit > b_digit ? 1 : -1
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Adjust a number for its exponent (conceptually multiply by 10^exponent)
|
||||
adjust_for_exponent :: proc(num: DDB_Number) -> DDB_Number {
|
||||
if num.exponent == 0 {
|
||||
return num
|
||||
}
|
||||
|
||||
result := num
|
||||
result.exponent = 0
|
||||
|
||||
if num.exponent > 0 {
|
||||
// Shift decimal point right
|
||||
exp := int(num.exponent)
|
||||
frac := num.fractional_part
|
||||
|
||||
// Move fractional digits to integer part
|
||||
shift := min(exp, len(frac))
|
||||
result.integer_part = strings.concatenate({num.integer_part, frac[:shift]})
|
||||
result.fractional_part = frac[shift:]
|
||||
|
||||
// Add zeros if needed
|
||||
if exp > len(frac) {
|
||||
zeros := strings.repeat("0", exp - len(frac))
|
||||
result.integer_part = strings.concatenate({result.integer_part, zeros})
|
||||
}
|
||||
} else {
|
||||
// Shift decimal point left
|
||||
exp := -int(num.exponent)
|
||||
int_part := num.integer_part
|
||||
|
||||
// Move integer digits to fractional part
|
||||
shift := min(exp, len(int_part))
|
||||
result.integer_part = int_part[:len(int_part)-shift]
|
||||
if len(result.integer_part) == 0 {
|
||||
result.integer_part = "0"
|
||||
}
|
||||
result.fractional_part = strings.concatenate({
|
||||
int_part[len(int_part)-shift:],
|
||||
num.fractional_part,
|
||||
})
|
||||
|
||||
// Add leading zeros if needed
|
||||
if exp > len(int_part) {
|
||||
zeros := strings.repeat("0", exp - len(int_part))
|
||||
result.fractional_part = strings.concatenate({zeros, result.fractional_part})
|
||||
}
|
||||
}
|
||||
|
||||
return normalize_ddb_number(result)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Canonical Encoding for Sort Keys
|
||||
//
|
||||
// For numbers to sort correctly in byte-wise comparisons, we need a
|
||||
// canonical encoding that preserves numeric ordering.
|
||||
//
|
||||
// Encoding format:
|
||||
// - 1 byte: sign/magnitude marker
|
||||
// - 0x00: negative infinity (reserved)
|
||||
// - 0x01-0x7F: negative numbers (inverted magnitude)
|
||||
// - 0x80: zero
|
||||
// - 0x81-0xFE: positive numbers (magnitude)
|
||||
// - 0xFF: positive infinity (reserved)
|
||||
// - N bytes: encoded magnitude (variable length)
|
||||
//
|
||||
// For positive numbers: we encode the magnitude directly with leading byte
|
||||
// indicating number of integer digits.
|
||||
//
|
||||
// For negative numbers: we encode the magnitude inverted (bitwise NOT) so
|
||||
// that larger negative numbers sort before smaller ones.
|
||||
// ============================================================================
|
||||
|
||||
// Encode a DDB_Number into canonical byte form for sort keys
|
||||
encode_ddb_number_for_sort :: proc(num: DDB_Number) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 64, context.allocator)
|
||||
|
||||
if is_ddb_number_zero(num) {
|
||||
bytes.buffer_write_byte(&buf, 0x80)
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Get normalized magnitude
|
||||
norm := normalize_ddb_number(num)
|
||||
adj := adjust_for_exponent(norm)
|
||||
|
||||
// Encode magnitude bytes
|
||||
mag_bytes := encode_magnitude(adj)
|
||||
|
||||
if num.sign {
|
||||
// Positive number: 0x81 + magnitude
|
||||
bytes.buffer_write_byte(&buf, 0x81)
|
||||
bytes.buffer_write(&buf, mag_bytes)
|
||||
} else {
|
||||
// Negative number: 0x7F - inverted magnitude
|
||||
bytes.buffer_write_byte(&buf, 0x7F)
|
||||
// Invert all magnitude bytes
|
||||
for b in mag_bytes {
|
||||
bytes.buffer_write_byte(&buf, ~b)
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Encode the magnitude of a number (integer + fractional parts)
|
||||
encode_magnitude :: proc(num: DDB_Number) -> []byte {
|
||||
buf: bytes.Buffer
|
||||
bytes.buffer_init_allocator(&buf, 0, 32, context.allocator)
|
||||
|
||||
// Write length of integer part as varint
|
||||
int_len := u64(len(num.integer_part))
|
||||
encode_varint(&buf, int_len)
|
||||
|
||||
// Write integer digits
|
||||
bytes.buffer_write_string(&buf, num.integer_part)
|
||||
|
||||
// Write fractional digits if any
|
||||
if len(num.fractional_part) > 0 {
|
||||
bytes.buffer_write_string(&buf, num.fractional_part)
|
||||
}
|
||||
|
||||
return bytes.buffer_to_bytes(&buf)
|
||||
}
|
||||
|
||||
// Decode a canonically encoded number back to DDB_Number
|
||||
decode_ddb_number_from_sort :: proc(data: []byte) -> (DDB_Number, bool) {
|
||||
if len(data) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
marker := data[0]
|
||||
|
||||
// Zero
|
||||
if marker == 0x80 {
|
||||
return DDB_Number{
|
||||
sign = true,
|
||||
integer_part = "0",
|
||||
fractional_part = "",
|
||||
exponent = 0,
|
||||
}, true
|
||||
}
|
||||
|
||||
// Positive number
|
||||
if marker == 0x81 {
|
||||
return decode_magnitude(data[1:], true)
|
||||
}
|
||||
|
||||
// Negative number (inverted bytes)
|
||||
if marker == 0x7F {
|
||||
// Un-invert the bytes
|
||||
inverted := make([]byte, len(data)-1)
|
||||
defer delete(inverted)
|
||||
for i in 0..<len(inverted) {
|
||||
inverted[i] = ~data[i+1]
|
||||
}
|
||||
return decode_magnitude(inverted, false)
|
||||
}
|
||||
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Decode magnitude bytes back to a DDB_Number
|
||||
decode_magnitude :: proc(data: []byte, positive: bool) -> (DDB_Number, bool) {
|
||||
if len(data) == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Read integer length
|
||||
int_len, bytes_read := decode_varint(data)
|
||||
if bytes_read == 0 || int_len == 0 {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
offset := bytes_read
|
||||
|
||||
// Read integer part
|
||||
if offset + int(int_len) > len(data) {
|
||||
return {}, false
|
||||
}
|
||||
int_part := string(data[offset:offset + int(int_len)])
|
||||
offset += int(int_len)
|
||||
|
||||
// Read fractional part if any
|
||||
frac_part := ""
|
||||
if offset < len(data) {
|
||||
frac_part = string(data[offset:])
|
||||
}
|
||||
|
||||
return DDB_Number{
|
||||
sign = positive,
|
||||
integer_part = int_part,
|
||||
fractional_part = frac_part,
|
||||
exponent = 0,
|
||||
}, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Decimal Arithmetic (38-digit precision, no float conversion)
|
||||
// ============================================================================
|
||||
|
||||
MAX_DDB_PRECISION :: 38
|
||||
|
||||
// Add two DDB_Numbers with full decimal precision.
|
||||
// Returns an owned DDB_Number.
|
||||
add_ddb_numbers :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
if is_ddb_number_zero(a) { return clone_ddb_number(b), true }
|
||||
if is_ddb_number_zero(b) { return clone_ddb_number(a), true }
|
||||
|
||||
if a.sign == b.sign {
|
||||
// Same sign: add magnitudes, keep sign
|
||||
result, ok := add_magnitudes(a, b)
|
||||
if !ok { return {}, false }
|
||||
result.sign = a.sign
|
||||
return result, true
|
||||
}
|
||||
|
||||
// Different signs: subtract smaller magnitude from larger
|
||||
cmp := compare_ddb_number_magnitudes(a, b)
|
||||
if cmp == 0 {
|
||||
return DDB_Number{
|
||||
sign = true,
|
||||
integer_part = strings.clone("0"),
|
||||
fractional_part = strings.clone(""),
|
||||
exponent = 0,
|
||||
}, true
|
||||
}
|
||||
|
||||
if cmp > 0 {
|
||||
result, ok := subtract_magnitudes(a, b)
|
||||
if !ok { return {}, false }
|
||||
result.sign = a.sign
|
||||
return result, true
|
||||
} else {
|
||||
result, ok := subtract_magnitudes(b, a)
|
||||
if !ok { return {}, false }
|
||||
result.sign = b.sign
|
||||
return result, true
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract two DDB_Numbers: a - b
|
||||
subtract_ddb_numbers :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
neg_b := b
|
||||
neg_b.sign = !b.sign
|
||||
return add_ddb_numbers(a, neg_b)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Internal arithmetic helpers
|
||||
// ============================================================================
|
||||
|
||||
// Expand a DDB_Number to effective integer and fractional digit bytes
|
||||
// with the exponent fully applied. Returns heap-allocated slices (caller frees).
|
||||
@(private="file")
|
||||
expand_digits :: proc(num: DDB_Number) -> (int_digits: []u8, frac_digits: []u8) {
|
||||
dp := len(num.integer_part) + int(num.exponent)
|
||||
all_len := len(num.integer_part) + len(num.fractional_part)
|
||||
|
||||
if dp <= 0 {
|
||||
// Everything is fractional, need leading zeros
|
||||
frac := make([]u8, -dp + all_len)
|
||||
for i in 0..<(-dp) {
|
||||
frac[i] = '0'
|
||||
}
|
||||
for i in 0..<len(num.integer_part) {
|
||||
frac[-dp + i] = num.integer_part[i]
|
||||
}
|
||||
for i in 0..<len(num.fractional_part) {
|
||||
frac[-dp + len(num.integer_part) + i] = num.fractional_part[i]
|
||||
}
|
||||
|
||||
int_d := make([]u8, 1)
|
||||
int_d[0] = '0'
|
||||
return int_d, frac
|
||||
}
|
||||
|
||||
if dp >= all_len {
|
||||
// Everything is integer, may need trailing zeros
|
||||
int_d := make([]u8, dp)
|
||||
for i in 0..<len(num.integer_part) {
|
||||
int_d[i] = num.integer_part[i]
|
||||
}
|
||||
for i in 0..<len(num.fractional_part) {
|
||||
int_d[len(num.integer_part) + i] = num.fractional_part[i]
|
||||
}
|
||||
for i in all_len..<dp {
|
||||
int_d[i] = '0'
|
||||
}
|
||||
return int_d, nil
|
||||
}
|
||||
|
||||
// Decimal point falls within the original integer_part
|
||||
if dp <= len(num.integer_part) {
|
||||
int_d := make([]u8, dp)
|
||||
for i in 0..<dp {
|
||||
int_d[i] = num.integer_part[i]
|
||||
}
|
||||
|
||||
frac_len := (len(num.integer_part) - dp) + len(num.fractional_part)
|
||||
frac := make([]u8, frac_len)
|
||||
for i in dp..<len(num.integer_part) {
|
||||
frac[i - dp] = num.integer_part[i]
|
||||
}
|
||||
offset := len(num.integer_part) - dp
|
||||
for i in 0..<len(num.fractional_part) {
|
||||
frac[offset + i] = num.fractional_part[i]
|
||||
}
|
||||
return int_d, frac
|
||||
}
|
||||
|
||||
// Decimal point falls within the original fractional_part
|
||||
frac_split := dp - len(num.integer_part)
|
||||
|
||||
int_d := make([]u8, dp)
|
||||
for i in 0..<len(num.integer_part) {
|
||||
int_d[i] = num.integer_part[i]
|
||||
}
|
||||
for i in 0..<frac_split {
|
||||
int_d[len(num.integer_part) + i] = num.fractional_part[i]
|
||||
}
|
||||
|
||||
remaining := len(num.fractional_part) - frac_split
|
||||
frac: []u8 = nil
|
||||
if remaining > 0 {
|
||||
frac = make([]u8, remaining)
|
||||
for i in frac_split..<len(num.fractional_part) {
|
||||
frac[i - frac_split] = num.fractional_part[i]
|
||||
}
|
||||
}
|
||||
return int_d, frac
|
||||
}
|
||||
|
||||
// Normalize a DDB_Number that owns its strings.
|
||||
// Clones the trimmed result, frees the originals.
|
||||
@(private="file")
|
||||
normalize_owned :: proc(num: DDB_Number) -> DDB_Number {
|
||||
norm := normalize_ddb_number(num)
|
||||
|
||||
// Clone the normalized subslices BEFORE freeing originals
|
||||
new_int := strings.clone(norm.integer_part)
|
||||
new_frac := strings.clone(norm.fractional_part)
|
||||
|
||||
// Free the originals
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
|
||||
return DDB_Number{
|
||||
sign = norm.sign,
|
||||
integer_part = new_int,
|
||||
fractional_part = new_frac,
|
||||
exponent = norm.exponent,
|
||||
}
|
||||
}
|
||||
|
||||
// Add absolute values. Returns owned DDB_Number (sign=true).
|
||||
@(private="file")
|
||||
add_magnitudes :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
a_int, a_frac := expand_digits(a)
|
||||
b_int, b_frac := expand_digits(b)
|
||||
defer { delete(a_int); delete(a_frac); delete(b_int); delete(b_frac) }
|
||||
|
||||
max_int := max(len(a_int), len(b_int))
|
||||
max_frac := max(len(a_frac), len(b_frac))
|
||||
total := max_int + max_frac
|
||||
|
||||
// Build zero-padded aligned arrays
|
||||
a_aligned := make([]u8, total)
|
||||
b_aligned := make([]u8, total)
|
||||
defer { delete(a_aligned); delete(b_aligned) }
|
||||
|
||||
for i in 0..<total { a_aligned[i] = '0'; b_aligned[i] = '0' }
|
||||
|
||||
// Integer digits: right-aligned in [0..max_int)
|
||||
a_off := max_int - len(a_int)
|
||||
b_off := max_int - len(b_int)
|
||||
for i in 0..<len(a_int) { a_aligned[a_off + i] = a_int[i] }
|
||||
for i in 0..<len(b_int) { b_aligned[b_off + i] = b_int[i] }
|
||||
|
||||
// Fractional digits: left-aligned in [max_int..total)
|
||||
for i in 0..<len(a_frac) { a_aligned[max_int + i] = a_frac[i] }
|
||||
for i in 0..<len(b_frac) { b_aligned[max_int + i] = b_frac[i] }
|
||||
|
||||
// Add right-to-left
|
||||
result := make([]u8, total + 1) // +1 for carry
|
||||
carry: u8 = 0
|
||||
for i := total - 1; i >= 0; i -= 1 {
|
||||
sum := (a_aligned[i] - '0') + (b_aligned[i] - '0') + carry
|
||||
result[i + 1] = (sum % 10) + '0'
|
||||
carry = sum / 10
|
||||
}
|
||||
result[0] = carry + '0'
|
||||
|
||||
// Split: decimal point is at max_int + 1 (carry slot shifts everything)
|
||||
int_end := max_int + 1
|
||||
int_str := strings.clone(string(result[:int_end]))
|
||||
frac_str := strings.clone(string(result[int_end:]))
|
||||
delete(result)
|
||||
|
||||
num := normalize_owned(DDB_Number{
|
||||
sign = true,
|
||||
integer_part = int_str,
|
||||
fractional_part = frac_str,
|
||||
exponent = 0,
|
||||
})
|
||||
|
||||
if len(num.integer_part) + len(num.fractional_part) > MAX_DDB_PRECISION {
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return num, true
|
||||
}
|
||||
|
||||
// Subtract absolute values: |a| - |b|, where |a| >= |b|.
|
||||
// Returns owned DDB_Number (sign=true).
|
||||
@(private="file")
|
||||
subtract_magnitudes :: proc(a: DDB_Number, b: DDB_Number) -> (DDB_Number, bool) {
|
||||
a_int, a_frac := expand_digits(a)
|
||||
b_int, b_frac := expand_digits(b)
|
||||
defer { delete(a_int); delete(a_frac); delete(b_int); delete(b_frac) }
|
||||
|
||||
max_int := max(len(a_int), len(b_int))
|
||||
max_frac := max(len(a_frac), len(b_frac))
|
||||
total := max_int + max_frac
|
||||
|
||||
a_aligned := make([]u8, total)
|
||||
b_aligned := make([]u8, total)
|
||||
defer { delete(a_aligned); delete(b_aligned) }
|
||||
|
||||
for i in 0..<total { a_aligned[i] = '0'; b_aligned[i] = '0' }
|
||||
|
||||
a_off := max_int - len(a_int)
|
||||
b_off := max_int - len(b_int)
|
||||
for i in 0..<len(a_int) { a_aligned[a_off + i] = a_int[i] }
|
||||
for i in 0..<len(b_int) { b_aligned[b_off + i] = b_int[i] }
|
||||
for i in 0..<len(a_frac) { a_aligned[max_int + i] = a_frac[i] }
|
||||
for i in 0..<len(b_frac) { b_aligned[max_int + i] = b_frac[i] }
|
||||
|
||||
// Subtract right-to-left
|
||||
result := make([]u8, total)
|
||||
borrow: u8 = 0
|
||||
for i := total - 1; i >= 0; i -= 1 {
|
||||
ad := a_aligned[i] - '0'
|
||||
bd := (b_aligned[i] - '0') + borrow
|
||||
if ad < bd {
|
||||
ad += 10
|
||||
borrow = 1
|
||||
} else {
|
||||
borrow = 0
|
||||
}
|
||||
result[i] = (ad - bd) + '0'
|
||||
}
|
||||
|
||||
int_str := strings.clone(string(result[:max_int]))
|
||||
frac_str := strings.clone(string(result[max_int:]))
|
||||
delete(result)
|
||||
|
||||
if len(int_str) == 0 {
|
||||
delete(int_str)
|
||||
int_str = strings.clone("0")
|
||||
}
|
||||
|
||||
num := normalize_owned(DDB_Number{
|
||||
sign = true,
|
||||
integer_part = int_str,
|
||||
fractional_part = frac_str,
|
||||
exponent = 0,
|
||||
})
|
||||
|
||||
if len(num.integer_part) + len(num.fractional_part) > MAX_DDB_PRECISION {
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return num, true
|
||||
}
|
||||
|
||||
// Format a DDB_Number for display
|
||||
format_ddb_number :: proc(num: DDB_Number) -> string {
|
||||
// Normalize first
|
||||
norm := normalize_ddb_number(num)
|
||||
|
||||
// Check if it's effectively an integer
|
||||
if len(norm.fractional_part) == 0 && norm.exponent >= 0 {
|
||||
builder := strings.builder_make()
|
||||
if !norm.sign {
|
||||
strings.write_string(&builder, "-")
|
||||
}
|
||||
strings.write_string(&builder, norm.integer_part)
|
||||
// Add trailing zeros for positive exponent
|
||||
for _ in 0..<norm.exponent {
|
||||
strings.write_string(&builder, "0")
|
||||
}
|
||||
return strings.to_string(builder)
|
||||
}
|
||||
|
||||
// Otherwise use full representation
|
||||
return ddb_number_to_string(norm)
|
||||
}
|
||||
|
||||
// Clones a ddb number type
|
||||
clone_ddb_number :: proc(num: DDB_Number) -> DDB_Number {
|
||||
return DDB_Number{
|
||||
sign = num.sign,
|
||||
integer_part = strings.clone(num.integer_part),
|
||||
fractional_part = strings.clone(num.fractional_part),
|
||||
exponent = num.exponent,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: encode_varint (you already have this in your codebase)
|
||||
@(private="file")
|
||||
encode_varint :: proc(buf: ^bytes.Buffer, value: u64) {
|
||||
v := value
|
||||
for {
|
||||
byte_val := u8(v & 0x7F)
|
||||
v >>= 7
|
||||
if v != 0 {
|
||||
byte_val |= 0x80
|
||||
}
|
||||
bytes.buffer_write_byte(buf, byte_val)
|
||||
if v == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: decode_varint
|
||||
@(private="file")
|
||||
decode_varint :: proc(data: []byte) -> (value: u64, bytes_read: int) {
|
||||
shift: u64 = 0
|
||||
for i in 0..<len(data) {
|
||||
byte_val := data[i]
|
||||
value |= u64(byte_val & 0x7F) << shift
|
||||
bytes_read = i + 1
|
||||
if (byte_val & 0x80) == 0 {
|
||||
return
|
||||
}
|
||||
shift += 7
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
1454
dynamodb/storage.odin
Normal file
1454
dynamodb/storage.odin
Normal file
File diff suppressed because it is too large
Load Diff
813
dynamodb/transact.odin
Normal file
813
dynamodb/transact.odin
Normal file
@@ -0,0 +1,813 @@
|
||||
// TransactWriteItems and TransactGetItems storage operations
|
||||
//
|
||||
// TransactWriteItems: Atomic write of up to 100 items across multiple tables.
|
||||
// - Supports Put, Delete, Update, and ConditionCheck actions
|
||||
// - ALL actions succeed or ALL fail (all-or-nothing)
|
||||
// - ConditionExpressions are evaluated BEFORE any mutations
|
||||
// - Uses exclusive locks on all involved tables
|
||||
//
|
||||
// TransactGetItems: Atomic read of up to 100 items across multiple tables.
|
||||
// - Each item specifies TableName + Key + optional ProjectionExpression
|
||||
// - All reads are consistent (snapshot isolation via table locks)
|
||||
package dynamodb
|
||||
|
||||
import "core:strings"
|
||||
import "core:sync"
|
||||
import "../rocksdb"
|
||||
|
||||
// ============================================================================
|
||||
// TransactWriteItems Types
|
||||
// ============================================================================
|
||||
|
||||
Transact_Write_Action_Type :: enum {
|
||||
Put,
|
||||
Delete,
|
||||
Update,
|
||||
Condition_Check,
|
||||
}
|
||||
|
||||
Transact_Write_Action :: struct {
|
||||
type: Transact_Write_Action_Type,
|
||||
table_name: string,
|
||||
// For Put: the full item to write
|
||||
item: Maybe(Item),
|
||||
// For Delete/Update/ConditionCheck: the key item
|
||||
key: Maybe(Item),
|
||||
// For Update: the parsed update plan
|
||||
update_plan: Maybe(Update_Plan),
|
||||
// ConditionExpression components (shared across all action types)
|
||||
condition_expr: Maybe(string),
|
||||
expr_attr_names: Maybe(map[string]string),
|
||||
expr_attr_values: map[string]Attribute_Value,
|
||||
// For Update: ReturnValuesOnConditionCheckFailure (not implemented yet, placeholder)
|
||||
}
|
||||
|
||||
Transact_Write_Result :: struct {
|
||||
// For now, either all succeed (no error) or we return a
|
||||
// TransactionCanceledException with reasons per action.
|
||||
cancellation_reasons: []Cancellation_Reason,
|
||||
}
|
||||
|
||||
Cancellation_Reason :: struct {
|
||||
code: string, // "None", "ConditionalCheckFailed", "ValidationError", etc.
|
||||
message: string,
|
||||
}
|
||||
|
||||
transact_write_action_destroy :: proc(action: ^Transact_Write_Action) {
|
||||
delete(action.table_name)
|
||||
if ce, has := action.condition_expr.?; has {
|
||||
delete(ce)
|
||||
}
|
||||
if item, has := action.item.?; has {
|
||||
item_copy := item
|
||||
item_destroy(&item_copy)
|
||||
}
|
||||
if key, has := action.key.?; has {
|
||||
key_copy := key
|
||||
item_destroy(&key_copy)
|
||||
}
|
||||
if plan, has := action.update_plan.?; has {
|
||||
plan_copy := plan
|
||||
update_plan_destroy(&plan_copy)
|
||||
}
|
||||
if names, has := action.expr_attr_names.?; has {
|
||||
for k, v in names {
|
||||
delete(k)
|
||||
delete(v)
|
||||
}
|
||||
names_copy := names
|
||||
delete(names_copy)
|
||||
}
|
||||
for k, v in action.expr_attr_values {
|
||||
delete(k)
|
||||
v_copy := v
|
||||
attr_value_destroy(&v_copy)
|
||||
}
|
||||
delete(action.expr_attr_values)
|
||||
}
|
||||
|
||||
transact_write_result_destroy :: proc(result: ^Transact_Write_Result) {
|
||||
if result.cancellation_reasons != nil {
|
||||
delete(result.cancellation_reasons)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TransactWriteItems — Execute an atomic batch of write operations
|
||||
//
|
||||
// DynamoDB semantics:
|
||||
// 1. Acquire exclusive locks on all involved tables
|
||||
// 2. Evaluate ALL ConditionExpressions (pre-flight check)
|
||||
// 3. If any condition fails → cancel entire transaction
|
||||
// 4. If all pass → apply all mutations
|
||||
// 5. Release locks
|
||||
//
|
||||
// Returns .None on success, Transaction_Cancelled on condition failure.
|
||||
// ============================================================================
|
||||
|
||||
Transaction_Error :: enum {
|
||||
None,
|
||||
Cancelled, // One or more conditions failed
|
||||
Validation_Error, // Bad request data
|
||||
Internal_Error, // Storage/serialization failure
|
||||
}
|
||||
|
||||
transact_write_items :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
actions: []Transact_Write_Action,
|
||||
) -> (Transact_Write_Result, Transaction_Error) {
|
||||
result: Transact_Write_Result
|
||||
|
||||
if len(actions) == 0 {
|
||||
return result, .Validation_Error
|
||||
}
|
||||
|
||||
// ---- Step 1: Collect unique table names and acquire locks ----
|
||||
table_set := make(map[string]bool, allocator = context.temp_allocator)
|
||||
for action in actions {
|
||||
table_set[action.table_name] = true
|
||||
}
|
||||
|
||||
table_names := make([dynamic]string, allocator = context.temp_allocator)
|
||||
for name in table_set {
|
||||
append(&table_names, name)
|
||||
}
|
||||
// Sort for deterministic lock ordering
|
||||
for i := 0; i < len(table_names); i += 1 {
|
||||
for j := i + 1; j < len(table_names); j += 1 {
|
||||
if table_names[j] < table_names[i] {
|
||||
table_names[i], table_names[j] = table_names[j], table_names[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locks := make([dynamic]^sync.RW_Mutex, allocator = context.temp_allocator)
|
||||
for name in table_names {
|
||||
lock := get_or_create_table_lock(engine, name)
|
||||
sync.rw_mutex_lock(lock)
|
||||
append(&locks, lock)
|
||||
}
|
||||
defer {
|
||||
for i := len(locks) - 1; i >= 0; i -= 1 {
|
||||
sync.rw_mutex_unlock(locks[i])
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Step 2: Fetch metadata and evaluate conditions ----
|
||||
reasons := make([]Cancellation_Reason, len(actions))
|
||||
any_failed := false
|
||||
|
||||
metadata_cache := make(map[string]Table_Metadata, allocator = context.temp_allocator)
|
||||
defer {
|
||||
for _, meta in metadata_cache {
|
||||
meta_copy := meta
|
||||
table_metadata_destroy(&meta_copy, engine.allocator)
|
||||
}
|
||||
}
|
||||
|
||||
for action, idx in actions {
|
||||
metadata: ^Table_Metadata
|
||||
if cached, found := &metadata_cache[action.table_name]; found {
|
||||
metadata = cached
|
||||
} else {
|
||||
meta, meta_err := get_table_metadata(engine, action.table_name)
|
||||
if meta_err != .None {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
message = "Table not found",
|
||||
}
|
||||
any_failed = true
|
||||
continue
|
||||
}
|
||||
metadata_cache[action.table_name] = meta
|
||||
metadata = &metadata_cache[action.table_name]
|
||||
}
|
||||
|
||||
key_item: Item
|
||||
switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
key_item = item
|
||||
} else {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
message = "Put action missing Item",
|
||||
}
|
||||
any_failed = true
|
||||
continue
|
||||
}
|
||||
case .Delete, .Update, .Condition_Check:
|
||||
if key, has := action.key.?; has {
|
||||
key_item = key
|
||||
} else {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
message = "Action missing Key",
|
||||
}
|
||||
any_failed = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate ConditionExpression
|
||||
if cond_str, has_cond := action.condition_expr.?; has_cond {
|
||||
existing_item, get_err := get_item_internal(engine, action.table_name, key_item, metadata)
|
||||
if get_err != .None && get_err != .Item_Not_Found {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "InternalError",
|
||||
message = "Failed to read existing item",
|
||||
}
|
||||
any_failed = true
|
||||
continue
|
||||
}
|
||||
defer {
|
||||
if ex, has_ex := existing_item.?; has_ex {
|
||||
ex_copy := ex
|
||||
item_destroy(&ex_copy)
|
||||
}
|
||||
}
|
||||
|
||||
filter_node, parse_ok := parse_filter_expression(
|
||||
cond_str, action.expr_attr_names, action.expr_attr_values,
|
||||
)
|
||||
if !parse_ok || filter_node == nil {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
message = "Invalid ConditionExpression",
|
||||
}
|
||||
any_failed = true
|
||||
continue
|
||||
}
|
||||
defer {
|
||||
filter_node_destroy(filter_node)
|
||||
}
|
||||
|
||||
eval_item: Item
|
||||
if item, has_item := existing_item.?; has_item {
|
||||
eval_item = item
|
||||
} else {
|
||||
eval_item = Item{}
|
||||
}
|
||||
|
||||
if !evaluate_filter(eval_item, filter_node) {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ConditionalCheckFailed",
|
||||
message = "The conditional request failed",
|
||||
}
|
||||
any_failed = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if action.type == .Condition_Check {
|
||||
reasons[idx] = Cancellation_Reason{code = "None"}
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate key/item
|
||||
switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
validation_err := validate_item_key_types(
|
||||
item, metadata.key_schema, metadata.attribute_definitions,
|
||||
)
|
||||
if validation_err != .None {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
message = "Key attribute type mismatch",
|
||||
}
|
||||
any_failed = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
case .Delete, .Update:
|
||||
// Key validation happens during batch building
|
||||
case .Condition_Check:
|
||||
// Already handled
|
||||
}
|
||||
|
||||
reasons[idx] = Cancellation_Reason{code = "None"}
|
||||
}
|
||||
|
||||
if any_failed {
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Cancelled
|
||||
}
|
||||
|
||||
// ---- Step 3: Build atomic WriteBatch with all operations ----
|
||||
batch, batch_err := rocksdb.batch_create()
|
||||
if batch_err != .None {
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
}
|
||||
defer rocksdb.batch_destroy(&batch)
|
||||
|
||||
// Read old items for GSI cleanup (must happen before batch write)
|
||||
old_items := make([]Maybe(Item), len(actions), allocator = context.temp_allocator)
|
||||
defer {
|
||||
for old_item in old_items {
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for action, idx in actions {
|
||||
if action.type == .Condition_Check {
|
||||
continue
|
||||
}
|
||||
|
||||
metadata := &metadata_cache[action.table_name]
|
||||
|
||||
// Read old item if needed for GSI cleanup
|
||||
key_item: Item
|
||||
#partial switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
key_item = item
|
||||
}
|
||||
case .Delete, .Update:
|
||||
if key, has := action.key.?; has {
|
||||
key_item = key
|
||||
}
|
||||
}
|
||||
|
||||
existing, read_err := get_item_internal(engine, action.table_name, key_item, metadata)
|
||||
#partial switch read_err {
|
||||
case .None:
|
||||
// Item found or not found — both fine.
|
||||
case .RocksDB_Error, .Serialization_Error, .Internal_Error:
|
||||
// Cannot safely determine old index keys — cancel the entire transaction.
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "InternalError",
|
||||
message = "Failed to read existing item for index maintenance",
|
||||
}
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
case .Missing_Key_Attribute, .Invalid_Key:
|
||||
// The key we built from the action's own item/key should always be valid
|
||||
// by this point (validated earlier), but treat defensively.
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "ValidationError",
|
||||
message = "Invalid key when reading existing item",
|
||||
}
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
case .Table_Not_Found, .Item_Not_Found, .Validation_Error:
|
||||
// These should not be returned by get_item_internal, but handle
|
||||
// defensively — treat as "item does not exist" and continue.
|
||||
}
|
||||
old_items[idx] = existing
|
||||
}
|
||||
|
||||
// Add all operations to batch
|
||||
for &action, idx in actions {
|
||||
if action.type == .Condition_Check {
|
||||
continue
|
||||
}
|
||||
|
||||
metadata := &metadata_cache[action.table_name]
|
||||
old_item := old_items[idx]
|
||||
|
||||
apply_err := transact_apply_action_batch(&batch, engine, &action, metadata, old_item)
|
||||
if apply_err != .None {
|
||||
reasons[idx] = Cancellation_Reason{
|
||||
code = "InternalError",
|
||||
message = "Failed to build mutation",
|
||||
}
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Step 4: Write batch atomically (ALL or NOTHING) ----
|
||||
write_err := rocksdb.batch_write(&engine.db, &batch)
|
||||
if write_err != .None {
|
||||
result.cancellation_reasons = reasons
|
||||
return result, .Internal_Error
|
||||
}
|
||||
|
||||
delete(reasons)
|
||||
return result, .None
|
||||
}
|
||||
|
||||
// Apply a single transact write action (called after all conditions have passed)
|
||||
@(private = "file")
|
||||
transact_apply_action_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
action: ^Transact_Write_Action,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
switch action.type {
|
||||
case .Put:
|
||||
if item, has := action.item.?; has {
|
||||
return put_item_batch(batch, engine, action.table_name, item, metadata, old_item)
|
||||
}
|
||||
return .Invalid_Key
|
||||
|
||||
case .Delete:
|
||||
if key, has := action.key.?; has {
|
||||
return delete_item_batch(batch, engine, action.table_name, key, metadata, old_item)
|
||||
}
|
||||
return .Invalid_Key
|
||||
|
||||
case .Update:
|
||||
if key, has := action.key.?; has {
|
||||
if plan, has_plan := action.update_plan.?; has_plan {
|
||||
plan_copy := plan
|
||||
return update_item_batch(batch, engine, action.table_name, key, &plan_copy, metadata, old_item)
|
||||
}
|
||||
return .Invalid_Key
|
||||
}
|
||||
return .Invalid_Key
|
||||
|
||||
case .Condition_Check:
|
||||
return .None
|
||||
}
|
||||
return .None
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
put_item_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
item: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(item, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
encoded_item, encode_ok := encode(item)
|
||||
if !encode_ok {
|
||||
return .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
// Add base item to batch
|
||||
rocksdb.batch_put(batch, storage_key, encoded_item)
|
||||
|
||||
// Add old GSI deletions to batch
|
||||
if old, has_old := old_item.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(batch, table_name, old, metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new GSI writes to batch
|
||||
gsi_write_err := gsi_batch_write_entries(batch, table_name, item, metadata)
|
||||
if gsi_write_err != .None {
|
||||
return gsi_write_err
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// Add delete operation to batch (with GSI cleanup)
|
||||
@(private = "file")
|
||||
delete_item_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(key, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// Add base item delete to batch
|
||||
rocksdb.batch_delete(batch, storage_key)
|
||||
|
||||
// Add GSI deletions to batch
|
||||
if old, has_old := old_item.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(batch, table_name, old, metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// Add update operation to batch (with GSI maintenance)
|
||||
@(private = "file")
|
||||
update_item_batch :: proc(
|
||||
batch: ^rocksdb.WriteBatch,
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key_item: Item,
|
||||
plan: ^Update_Plan,
|
||||
metadata: ^Table_Metadata,
|
||||
old_item_pre: Maybe(Item),
|
||||
) -> Storage_Error {
|
||||
key_struct, key_ok := key_from_item(key_item, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// Start with existing item or create new
|
||||
existing_item: Item
|
||||
if old, has_old := old_item_pre.?; has_old {
|
||||
existing_item = item_deep_copy(old)
|
||||
} else {
|
||||
existing_item = make(Item)
|
||||
for ks in metadata.key_schema {
|
||||
if val, found := key_item[ks.attribute_name]; found {
|
||||
existing_item[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
defer item_destroy(&existing_item)
|
||||
|
||||
// Apply update plan.
|
||||
if exec_err := execute_update_plan(&existing_item, plan); exec_err != .None {
|
||||
return .Validation_Error
|
||||
}
|
||||
|
||||
// Encode updated item
|
||||
encoded_item, encode_ok := encode(existing_item)
|
||||
if !encode_ok {
|
||||
return .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
// Add base item to batch
|
||||
rocksdb.batch_put(batch, storage_key, encoded_item)
|
||||
|
||||
// Add old GSI deletions to batch
|
||||
if old, has_old := old_item_pre.?; has_old {
|
||||
gsi_del_err := gsi_batch_delete_entries(batch, table_name, old, metadata)
|
||||
if gsi_del_err != .None {
|
||||
return gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new GSI writes to batch
|
||||
gsi_write_err := gsi_batch_write_entries(batch, table_name, existing_item, metadata)
|
||||
if gsi_write_err != .None {
|
||||
return gsi_write_err
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
|
||||
// ============================================================================
|
||||
// Internal storage operations that skip lock acquisition
|
||||
// (Used by transact_write_items which manages its own locking)
|
||||
// ============================================================================
|
||||
|
||||
get_item_internal :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key: Item,
|
||||
metadata: ^Table_Metadata,
|
||||
) -> (Maybe(Item), Storage_Error) {
|
||||
key_struct, key_ok := key_from_item(key, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return nil, .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return nil, .Invalid_Key
|
||||
}
|
||||
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
value, get_err := rocksdb.db_get(&engine.db, storage_key)
|
||||
if get_err == .NotFound {
|
||||
return nil, .None
|
||||
}
|
||||
if get_err != .None {
|
||||
return nil, .RocksDB_Error
|
||||
}
|
||||
defer delete(value)
|
||||
|
||||
item, decode_ok := decode(value)
|
||||
if !decode_ok {
|
||||
return nil, .Serialization_Error
|
||||
}
|
||||
|
||||
return item, .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TransactGetItems Types
|
||||
// ============================================================================
|
||||
|
||||
Transact_Get_Action :: struct {
|
||||
table_name: string,
|
||||
key: Item,
|
||||
projection: Maybe([]string), // Optional ProjectionExpression paths
|
||||
}
|
||||
|
||||
Transact_Get_Result :: struct {
|
||||
items: []Maybe(Item), // One per action, nil if item not found
|
||||
}
|
||||
|
||||
transact_get_action_destroy :: proc(action: ^Transact_Get_Action) {
|
||||
delete(action.table_name)
|
||||
item_destroy(&action.key)
|
||||
if proj, has := action.projection.?; has {
|
||||
for path in proj {
|
||||
delete(path)
|
||||
}
|
||||
delete(proj)
|
||||
}
|
||||
}
|
||||
|
||||
transact_get_result_destroy :: proc(result: ^Transact_Get_Result) {
|
||||
for &maybe_item in result.items {
|
||||
if item, has := maybe_item.?; has {
|
||||
item_copy := item
|
||||
item_destroy(&item_copy)
|
||||
}
|
||||
}
|
||||
delete(result.items)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TransactGetItems — Atomically read up to 100 items
|
||||
//
|
||||
// DynamoDB semantics:
|
||||
// - All reads are performed with a consistent snapshot
|
||||
// - Missing items are returned as nil (no error)
|
||||
// - ProjectionExpression is applied per-item
|
||||
// ============================================================================
|
||||
|
||||
transact_get_items :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
actions: []Transact_Get_Action,
|
||||
) -> (Transact_Get_Result, Transaction_Error) {
|
||||
result: Transact_Get_Result
|
||||
|
||||
if len(actions) == 0 {
|
||||
return result, .Validation_Error
|
||||
}
|
||||
|
||||
// Collect unique tables and acquire shared locks in deterministic order
|
||||
table_set := make(map[string]bool, allocator = context.temp_allocator)
|
||||
for action in actions {
|
||||
table_set[action.table_name] = true
|
||||
}
|
||||
|
||||
table_names := make([dynamic]string, allocator = context.temp_allocator)
|
||||
for name in table_set {
|
||||
append(&table_names, name)
|
||||
}
|
||||
for i := 0; i < len(table_names); i += 1 {
|
||||
for j := i + 1; j < len(table_names); j += 1 {
|
||||
if table_names[j] < table_names[i] {
|
||||
table_names[i], table_names[j] = table_names[j], table_names[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locks := make([dynamic]^sync.RW_Mutex, allocator = context.temp_allocator)
|
||||
for name in table_names {
|
||||
lock := get_or_create_table_lock(engine, name)
|
||||
sync.rw_mutex_shared_lock(lock)
|
||||
append(&locks, lock)
|
||||
}
|
||||
defer {
|
||||
for i := len(locks) - 1; i >= 0; i -= 1 {
|
||||
sync.rw_mutex_shared_unlock(locks[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Cache metadata
|
||||
metadata_cache := make(map[string]Table_Metadata, allocator = context.temp_allocator)
|
||||
defer {
|
||||
for _, meta in metadata_cache {
|
||||
meta_copy := meta
|
||||
table_metadata_destroy(&meta_copy, engine.allocator)
|
||||
}
|
||||
}
|
||||
|
||||
items := make([]Maybe(Item), len(actions))
|
||||
|
||||
for action, idx in actions {
|
||||
// Get metadata (cached)
|
||||
metadata: ^Table_Metadata
|
||||
if cached, found := &metadata_cache[action.table_name]; found {
|
||||
metadata = cached
|
||||
} else {
|
||||
meta, meta_err := get_table_metadata(engine, action.table_name)
|
||||
if meta_err != .None {
|
||||
items[idx] = nil
|
||||
continue
|
||||
}
|
||||
metadata_cache[action.table_name] = meta
|
||||
metadata = &metadata_cache[action.table_name]
|
||||
}
|
||||
|
||||
// Fetch item
|
||||
item_result, get_err := get_item_internal(engine, action.table_name, action.key, metadata)
|
||||
if get_err != .None {
|
||||
items[idx] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply projection if specified
|
||||
if item, has_item := item_result.?; has_item {
|
||||
if proj, has_proj := action.projection.?; has_proj && len(proj) > 0 {
|
||||
projected := apply_projection(item, proj)
|
||||
item_copy := item
|
||||
item_destroy(&item_copy)
|
||||
items[idx] = projected
|
||||
} else {
|
||||
items[idx] = item
|
||||
}
|
||||
} else {
|
||||
items[idx] = nil
|
||||
}
|
||||
}
|
||||
|
||||
result.items = items
|
||||
return result, .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helper: Extract modified attribute paths from an Update_Plan
|
||||
//
|
||||
// Used for UPDATED_NEW / UPDATED_OLD ReturnValues filtering.
|
||||
// DynamoDB only returns the attributes that were actually modified
|
||||
// by the UpdateExpression, not the entire item.
|
||||
// ============================================================================
|
||||
|
||||
get_update_plan_modified_paths :: proc(plan: ^Update_Plan) -> []string {
|
||||
paths := make(map[string]bool, allocator = context.temp_allocator)
|
||||
|
||||
for action in plan.sets {
|
||||
paths[action.path] = true
|
||||
}
|
||||
for action in plan.removes {
|
||||
paths[action.path] = true
|
||||
}
|
||||
for action in plan.adds {
|
||||
paths[action.path] = true
|
||||
}
|
||||
for action in plan.deletes {
|
||||
paths[action.path] = true
|
||||
}
|
||||
|
||||
result := make([]string, len(paths))
|
||||
i := 0
|
||||
for path in paths {
|
||||
result[i] = path
|
||||
i += 1
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Filter an item to only include the specified attribute paths.
|
||||
// Returns a new deep-copied item containing only matching attributes.
|
||||
filter_item_to_paths :: proc(item: Item, paths: []string) -> Item {
|
||||
result := make(Item)
|
||||
for path in paths {
|
||||
if val, found := item[path]; found {
|
||||
result[strings.clone(path)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -6,25 +6,24 @@ import "core:strings"
|
||||
// DynamoDB AttributeValue - the core data type
|
||||
Attribute_Value :: union {
|
||||
String, // S
|
||||
Number, // N (stored as string)
|
||||
DDB_Number, // N — decimal-preserving numeric type
|
||||
Binary, // B (base64)
|
||||
Bool, // BOOL
|
||||
Null, // NULL
|
||||
String_Set, // SS
|
||||
Number_Set, // NS
|
||||
DDB_Number_Set, // NS
|
||||
Binary_Set, // BS
|
||||
List, // L
|
||||
Map, // M
|
||||
}
|
||||
|
||||
String :: distinct string
|
||||
Number :: distinct string
|
||||
Binary :: distinct string
|
||||
Bool :: distinct bool
|
||||
Null :: distinct bool
|
||||
|
||||
String_Set :: distinct []string
|
||||
Number_Set :: distinct []string
|
||||
DDB_Number_Set :: distinct []DDB_Number
|
||||
Binary_Set :: distinct []string
|
||||
List :: distinct []Attribute_Value
|
||||
Map :: distinct map[string]Attribute_Value
|
||||
@@ -60,7 +59,7 @@ key_from_item :: proc(item: Item, key_schema: []Key_Schema_Element) -> (Key, boo
|
||||
|
||||
// Validate that key is a scalar type (S, N, or B)
|
||||
#partial switch _ in attr {
|
||||
case String, Number, Binary:
|
||||
case String, DDB_Number, Binary:
|
||||
// Valid key type
|
||||
case:
|
||||
return {}, false
|
||||
@@ -113,11 +112,11 @@ Key_Values :: struct {
|
||||
key_get_values :: proc(key: ^Key) -> (Key_Values, bool) {
|
||||
pk_bytes: []byte
|
||||
|
||||
switch v in key.pk {
|
||||
#partial switch v in key.pk {
|
||||
case String:
|
||||
pk_bytes = transmute([]byte)string(v)
|
||||
case Number:
|
||||
pk_bytes = transmute([]byte)string(v)
|
||||
case DDB_Number:
|
||||
pk_bytes = encode_ddb_number_for_sort(v)
|
||||
case Binary:
|
||||
pk_bytes = transmute([]byte)string(v)
|
||||
case:
|
||||
@@ -126,11 +125,11 @@ key_get_values :: proc(key: ^Key) -> (Key_Values, bool) {
|
||||
|
||||
sk_bytes: Maybe([]byte)
|
||||
if sk, ok := key.sk.?; ok {
|
||||
switch v in sk {
|
||||
#partial switch v in sk {
|
||||
case String:
|
||||
sk_bytes = transmute([]byte)string(v)
|
||||
case Number:
|
||||
sk_bytes = transmute([]byte)string(v)
|
||||
case DDB_Number:
|
||||
sk_bytes = encode_ddb_number_for_sort(v)
|
||||
case Binary:
|
||||
sk_bytes = transmute([]byte)string(v)
|
||||
case:
|
||||
@@ -251,6 +250,18 @@ table_status_to_string :: proc(status: Table_Status) -> string {
|
||||
return "ACTIVE"
|
||||
}
|
||||
|
||||
table_status_from_string :: proc(s: string) -> Table_Status {
|
||||
switch s {
|
||||
case "CREATING": return .CREATING
|
||||
case "UPDATING": return .UPDATING
|
||||
case "DELETING": return .DELETING
|
||||
case "ACTIVE": return .ACTIVE
|
||||
case "ARCHIVING": return .ARCHIVING
|
||||
case "ARCHIVED": return .ARCHIVED
|
||||
}
|
||||
return .ACTIVE
|
||||
}
|
||||
|
||||
// Table description
|
||||
Table_Description :: struct {
|
||||
table_name: string,
|
||||
@@ -350,13 +361,38 @@ error_to_response :: proc(err_type: DynamoDB_Error_Type, message: string) -> str
|
||||
return fmt.aprintf(`{{"__type":"%s","message":"%s"}}`, type_str, message)
|
||||
}
|
||||
|
||||
// Build an Attribute_Value with the correct scalar type from raw bytes
|
||||
build_attribute_value_with_type :: proc(raw_bytes: []byte, attr_type: Scalar_Attribute_Type) -> Attribute_Value {
|
||||
switch attr_type {
|
||||
case .S:
|
||||
return String(strings.clone(string(raw_bytes)))
|
||||
case .N:
|
||||
// Key bytes are canonical-encoded via encode_ddb_number_for_sort.
|
||||
// Decode them back to a DDB_Number.
|
||||
ddb_num, ok := decode_ddb_number_from_sort(raw_bytes)
|
||||
if ok {
|
||||
return clone_ddb_number(ddb_num)
|
||||
}
|
||||
// Fallback: try interpreting as a plain numeric string
|
||||
fallback_num, fb_ok := parse_ddb_number(string(raw_bytes))
|
||||
if fb_ok {
|
||||
return fallback_num
|
||||
}
|
||||
// Last resort — return as string (shouldn't happen)
|
||||
return String(strings.clone(string(raw_bytes)))
|
||||
case .B:
|
||||
return Binary(strings.clone(string(raw_bytes)))
|
||||
}
|
||||
return String(strings.clone(string(raw_bytes)))
|
||||
}
|
||||
|
||||
// Deep copy an attribute value
|
||||
attr_value_deep_copy :: proc(attr: Attribute_Value) -> Attribute_Value {
|
||||
switch v in attr {
|
||||
case String:
|
||||
return String(strings.clone(string(v)))
|
||||
case Number:
|
||||
return Number(strings.clone(string(v)))
|
||||
case DDB_Number:
|
||||
return clone_ddb_number(v)
|
||||
case Binary:
|
||||
return Binary(strings.clone(string(v)))
|
||||
case Bool:
|
||||
@@ -369,12 +405,12 @@ attr_value_deep_copy :: proc(attr: Attribute_Value) -> Attribute_Value {
|
||||
ss[i] = strings.clone(s)
|
||||
}
|
||||
return String_Set(ss)
|
||||
case Number_Set:
|
||||
ns := make([]string, len(v))
|
||||
for n, i in v {
|
||||
ns[i] = strings.clone(n)
|
||||
case DDB_Number_Set:
|
||||
ddb_ns := make([]DDB_Number, len(v))
|
||||
for num, i in v {
|
||||
ddb_ns[i] = clone_ddb_number(num)
|
||||
}
|
||||
return Number_Set(ns)
|
||||
return DDB_Number_Set(ddb_ns)
|
||||
case Binary_Set:
|
||||
bs := make([]string, len(v))
|
||||
for b, i in v {
|
||||
@@ -402,38 +438,44 @@ attr_value_destroy :: proc(attr: ^Attribute_Value) {
|
||||
switch v in attr {
|
||||
case String:
|
||||
delete(string(v))
|
||||
case Number:
|
||||
delete(string(v))
|
||||
case DDB_Number:
|
||||
delete(v.integer_part)
|
||||
delete(v.fractional_part)
|
||||
case Binary:
|
||||
delete(string(v))
|
||||
case String_Set:
|
||||
for s in v {
|
||||
delete(s)
|
||||
}
|
||||
delete([]string(v))
|
||||
case Number_Set:
|
||||
for n in v {
|
||||
delete(n)
|
||||
slice := v
|
||||
delete(slice)
|
||||
case DDB_Number_Set:
|
||||
for num in v {
|
||||
delete(num.integer_part)
|
||||
delete(num.fractional_part)
|
||||
}
|
||||
delete([]string(v))
|
||||
delete(v)
|
||||
case Binary_Set:
|
||||
for b in v {
|
||||
delete(b)
|
||||
}
|
||||
delete([]string(v))
|
||||
slice := v
|
||||
delete(slice)
|
||||
case List:
|
||||
for item in v {
|
||||
item_copy := item
|
||||
attr_value_destroy(&item_copy)
|
||||
}
|
||||
delete([]Attribute_Value(v))
|
||||
list := v
|
||||
delete(list)
|
||||
case Map:
|
||||
for key, val in v {
|
||||
delete(key)
|
||||
val_copy := val
|
||||
attr_value_destroy(&val_copy)
|
||||
}
|
||||
delete(map[string]Attribute_Value(v))
|
||||
m := v
|
||||
delete(m)
|
||||
case Bool, Null:
|
||||
// Nothing to free
|
||||
}
|
||||
|
||||
978
dynamodb/update.odin
Normal file
978
dynamodb/update.odin
Normal file
@@ -0,0 +1,978 @@
|
||||
// UpdateExpression Parser and Executor
|
||||
// Supports: SET path = value [, path = value ...]
|
||||
// REMOVE path [, path ...]
|
||||
// ADD path value [, path value ...] (numeric add / set add)
|
||||
// DELETE path value [, path value ...] (set remove)
|
||||
//
|
||||
// Values can be:
|
||||
// :placeholder → resolved from ExpressionAttributeValues
|
||||
// path + :placeholder → numeric addition
|
||||
// path - :placeholder → numeric subtraction
|
||||
// if_not_exists(path, :placeholder) → default value
|
||||
// list_append(operand, operand) → list concatenation
|
||||
package dynamodb
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:strings"
|
||||
|
||||
// ============================================================================
|
||||
// Update Plan — parsed representation of an UpdateExpression
|
||||
// ============================================================================
|
||||
|
||||
Update_Action_Type :: enum {
|
||||
SET,
|
||||
REMOVE,
|
||||
ADD,
|
||||
DELETE,
|
||||
}
|
||||
|
||||
Set_Value_Kind :: enum {
|
||||
Direct, // SET x = :val
|
||||
Plus, // SET x = x + :val or SET x = :val + x
|
||||
Minus, // SET x = x - :val
|
||||
If_Not_Exists, // SET x = if_not_exists(x, :val)
|
||||
List_Append, // SET x = list_append(x, :val)
|
||||
}
|
||||
|
||||
Set_Action :: struct {
|
||||
path: string,
|
||||
value_kind: Set_Value_Kind,
|
||||
value: Attribute_Value, // primary value
|
||||
source: string, // source path for Plus/Minus/If_Not_Exists/List_Append
|
||||
value2: Maybe(Attribute_Value), // second operand for list_append where both are values
|
||||
}
|
||||
|
||||
Remove_Action :: struct {
|
||||
path: string,
|
||||
}
|
||||
|
||||
Add_Action :: struct {
|
||||
path: string,
|
||||
value: Attribute_Value,
|
||||
}
|
||||
|
||||
Delete_Action :: struct {
|
||||
path: string,
|
||||
value: Attribute_Value,
|
||||
}
|
||||
|
||||
Update_Plan :: struct {
|
||||
sets: [dynamic]Set_Action,
|
||||
removes: [dynamic]Remove_Action,
|
||||
adds: [dynamic]Add_Action,
|
||||
deletes: [dynamic]Delete_Action,
|
||||
}
|
||||
|
||||
update_plan_destroy :: proc(plan: ^Update_Plan) {
|
||||
for &s in plan.sets {
|
||||
attr_value_destroy(&s.value)
|
||||
if v2, ok := s.value2.?; ok {
|
||||
v2_copy := v2
|
||||
attr_value_destroy(&v2_copy)
|
||||
}
|
||||
}
|
||||
delete(plan.sets)
|
||||
|
||||
delete(plan.removes)
|
||||
|
||||
for &a in plan.adds {
|
||||
attr_value_destroy(&a.value)
|
||||
}
|
||||
delete(plan.adds)
|
||||
|
||||
for &d in plan.deletes {
|
||||
attr_value_destroy(&d.value)
|
||||
}
|
||||
delete(plan.deletes)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Parse UpdateExpression
|
||||
//
|
||||
// Grammar (simplified):
|
||||
// update_expr = clause { clause }
|
||||
// clause = "SET" set_list | "REMOVE" remove_list | "ADD" add_list | "DELETE" delete_list
|
||||
// set_list = set_entry { "," set_entry }
|
||||
// set_entry = path "=" value_expr
|
||||
// value_expr = :placeholder
|
||||
// | path "+" :placeholder
|
||||
// | path "-" :placeholder
|
||||
// | "if_not_exists" "(" path "," :placeholder ")"
|
||||
// | "list_append" "(" operand "," operand ")"
|
||||
// remove_list = path { "," path }
|
||||
// add_list = add_entry { "," add_entry }
|
||||
// add_entry = path :placeholder
|
||||
// delete_list = delete_entry { "," delete_entry }
|
||||
// delete_entry= path :placeholder
|
||||
// ============================================================================
|
||||
|
||||
parse_update_expression :: proc(
|
||||
expression: string,
|
||||
attribute_names: Maybe(map[string]string),
|
||||
attribute_values: map[string]Attribute_Value,
|
||||
) -> (plan: Update_Plan, ok: bool) {
|
||||
plan.sets = make([dynamic]Set_Action)
|
||||
plan.removes = make([dynamic]Remove_Action)
|
||||
plan.adds = make([dynamic]Add_Action)
|
||||
plan.deletes = make([dynamic]Delete_Action)
|
||||
|
||||
t := tokenizer_init(expression)
|
||||
|
||||
for {
|
||||
keyword_maybe := tokenizer_next(&t)
|
||||
keyword_str, has_keyword := keyword_maybe.?
|
||||
if !has_keyword {
|
||||
break // done
|
||||
}
|
||||
|
||||
if strings.equal_fold(keyword_str, "SET") {
|
||||
if !parse_set_clause(&t, &plan, attribute_names, attribute_values) {
|
||||
update_plan_destroy(&plan)
|
||||
return {}, false
|
||||
}
|
||||
} else if strings.equal_fold(keyword_str, "REMOVE") {
|
||||
if !parse_remove_clause(&t, &plan, attribute_names) {
|
||||
update_plan_destroy(&plan)
|
||||
return {}, false
|
||||
}
|
||||
} else if strings.equal_fold(keyword_str, "ADD") {
|
||||
if !parse_add_clause(&t, &plan, attribute_names, attribute_values) {
|
||||
update_plan_destroy(&plan)
|
||||
return {}, false
|
||||
}
|
||||
} else if strings.equal_fold(keyword_str, "DELETE") {
|
||||
if !parse_delete_clause(&t, &plan, attribute_names, attribute_values) {
|
||||
update_plan_destroy(&plan)
|
||||
return {}, false
|
||||
}
|
||||
} else {
|
||||
update_plan_destroy(&plan)
|
||||
return {}, false
|
||||
}
|
||||
}
|
||||
|
||||
return plan, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SET clause parsing
|
||||
// ============================================================================
|
||||
|
||||
parse_set_clause :: proc(
|
||||
t: ^Tokenizer,
|
||||
plan: ^Update_Plan,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> bool {
|
||||
saved_pos: int
|
||||
for {
|
||||
// Save position before reading so we can rewind if it's a clause keyword
|
||||
saved_pos = t.pos
|
||||
|
||||
// Path
|
||||
path_tok, path_ok := next_token(t)
|
||||
if !path_ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if this is actually a new clause keyword (SET/REMOVE/ADD/DELETE)
|
||||
if is_clause_keyword(path_tok) {
|
||||
t.pos = saved_pos
|
||||
return true
|
||||
}
|
||||
|
||||
path, path_resolved := resolve_attribute_name(path_tok, names)
|
||||
if !path_resolved {
|
||||
return false
|
||||
}
|
||||
|
||||
// "="
|
||||
eq_tok, eq_ok := next_token(t)
|
||||
if !eq_ok || eq_tok != "=" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Value expression
|
||||
action, act_ok := parse_set_value_expr(t, path, names, values)
|
||||
if !act_ok {
|
||||
return false
|
||||
}
|
||||
|
||||
append(&plan.sets, action)
|
||||
|
||||
// Check for comma (more entries) or end
|
||||
saved_pos = t.pos
|
||||
comma_maybe := tokenizer_next(t)
|
||||
if comma, has := comma_maybe.?; has {
|
||||
if comma == "," {
|
||||
continue
|
||||
}
|
||||
// Not a comma — put it back
|
||||
t.pos = saved_pos
|
||||
}
|
||||
break
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
parse_set_value_expr :: proc(
|
||||
t: ^Tokenizer,
|
||||
path: string,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (action: Set_Action, ok: bool) {
|
||||
first_tok, first_ok := next_token(t)
|
||||
if !first_ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Check for if_not_exists(...)
|
||||
if strings.equal_fold(first_tok, "if_not_exists") {
|
||||
action, ok = parse_if_not_exists(t, path, names, values)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for list_append(...)
|
||||
if strings.equal_fold(first_tok, "list_append") {
|
||||
action, ok = parse_list_append(t, path, names, values)
|
||||
return
|
||||
}
|
||||
|
||||
peek_pos: int
|
||||
|
||||
// Check if first token is a :placeholder (direct value)
|
||||
if len(first_tok) > 0 && first_tok[0] == ':' {
|
||||
// Could be :val + path or :val - path or just :val
|
||||
peek_pos = t.pos
|
||||
op_maybe := tokenizer_next(t)
|
||||
if op, has_op := op_maybe.?; has_op && (op == "+" || op == "-") {
|
||||
// :val op path
|
||||
second_tok, sec_ok := next_token(t)
|
||||
if !sec_ok {
|
||||
return {}, false
|
||||
}
|
||||
source, source_resolved := resolve_attribute_name(second_tok, names)
|
||||
if !source_resolved {
|
||||
return {}, false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(first_tok, values)
|
||||
if !val_ok {
|
||||
return {}, false
|
||||
}
|
||||
kind := Set_Value_Kind.Plus if op == "+" else Set_Value_Kind.Minus
|
||||
return Set_Action{
|
||||
path = path,
|
||||
value_kind = kind,
|
||||
value = val,
|
||||
source = source,
|
||||
}, true
|
||||
}
|
||||
// Just a direct value
|
||||
t.pos = peek_pos
|
||||
val, val_ok := resolve_attribute_value(first_tok, values)
|
||||
if !val_ok {
|
||||
return {}, false
|
||||
}
|
||||
return Set_Action{
|
||||
path = path,
|
||||
value_kind = .Direct,
|
||||
value = val,
|
||||
}, true
|
||||
}
|
||||
|
||||
// First token is a path — check for path + :val or path - :val
|
||||
source, source_resolved := resolve_attribute_name(first_tok, names)
|
||||
if !source_resolved {
|
||||
return {}, false
|
||||
}
|
||||
peek_pos = t.pos
|
||||
op_maybe := tokenizer_next(t)
|
||||
if op, has_op := op_maybe.?; has_op && (op == "+" || op == "-") {
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
return {}, false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
return {}, false
|
||||
}
|
||||
kind := Set_Value_Kind.Plus if op == "+" else Set_Value_Kind.Minus
|
||||
return Set_Action{
|
||||
path = path,
|
||||
value_kind = kind,
|
||||
value = val,
|
||||
source = source,
|
||||
}, true
|
||||
}
|
||||
// Just a path reference — treat as direct copy (SET a = b)
|
||||
t.pos = peek_pos
|
||||
return {}, false
|
||||
}
|
||||
|
||||
parse_if_not_exists :: proc(
|
||||
t: ^Tokenizer,
|
||||
path: string,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (action: Set_Action, ok: bool) {
|
||||
lparen, lp_ok := next_token(t)
|
||||
if !lp_ok || lparen != "(" {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
src_tok, src_ok := next_token(t)
|
||||
if !src_ok {
|
||||
return {}, false
|
||||
}
|
||||
source, source_resolved := resolve_attribute_name(src_tok, names)
|
||||
if !source_resolved {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
comma, comma_ok := next_token(t)
|
||||
if !comma_ok || comma != "," {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
return {}, false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
rparen, rp_ok := next_token(t)
|
||||
if !rp_ok || rparen != ")" {
|
||||
attr_value_destroy(&val)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return Set_Action{
|
||||
path = path,
|
||||
value_kind = .If_Not_Exists,
|
||||
value = val,
|
||||
source = source,
|
||||
}, true
|
||||
}
|
||||
|
||||
parse_list_append :: proc(
|
||||
t: ^Tokenizer,
|
||||
path: string,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> (action: Set_Action, ok: bool) {
|
||||
lparen, lp_ok := next_token(t)
|
||||
if !lp_ok || lparen != "(" {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// First operand — could be :val or path
|
||||
first_tok, first_ok := next_token(t)
|
||||
if !first_ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
comma, comma_ok := next_token(t)
|
||||
if !comma_ok || comma != "," {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Second operand
|
||||
second_tok, second_ok := next_token(t)
|
||||
if !second_ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
rparen, rp_ok := next_token(t)
|
||||
if !rp_ok || rparen != ")" {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Determine which is the path and which is the value
|
||||
// Common patterns: list_append(path, :val) or list_append(:val, path)
|
||||
source: string
|
||||
val: Attribute_Value
|
||||
resolved: bool
|
||||
|
||||
if len(first_tok) > 0 && first_tok[0] == ':' {
|
||||
// list_append(:val, path)
|
||||
v, v_ok := resolve_attribute_value(first_tok, values)
|
||||
if !v_ok {
|
||||
return {}, false
|
||||
}
|
||||
val = v
|
||||
source, resolved = resolve_attribute_name(second_tok, names)
|
||||
if !resolved {
|
||||
return {}, false
|
||||
}
|
||||
} else if len(second_tok) > 0 && second_tok[0] == ':' {
|
||||
// list_append(path, :val)
|
||||
source, resolved = resolve_attribute_name(first_tok, names)
|
||||
if !resolved {
|
||||
return {}, false
|
||||
}
|
||||
v, v_ok := resolve_attribute_value(second_tok, values)
|
||||
if !v_ok {
|
||||
return {}, false
|
||||
}
|
||||
val = v
|
||||
} else {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
return Set_Action{
|
||||
path = path,
|
||||
value_kind = .List_Append,
|
||||
value = val,
|
||||
source = source,
|
||||
}, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// REMOVE clause parsing
|
||||
// ============================================================================
|
||||
|
||||
parse_remove_clause :: proc(
|
||||
t: ^Tokenizer,
|
||||
plan: ^Update_Plan,
|
||||
names: Maybe(map[string]string),
|
||||
) -> bool {
|
||||
saved_pos: int
|
||||
for {
|
||||
saved_pos = t.pos
|
||||
|
||||
path_tok, path_ok := next_token(t)
|
||||
if !path_ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if is_clause_keyword(path_tok) {
|
||||
t.pos = saved_pos
|
||||
return true
|
||||
}
|
||||
|
||||
path, path_resolved := resolve_attribute_name(path_tok, names)
|
||||
if !path_resolved {
|
||||
return false
|
||||
}
|
||||
|
||||
append(&plan.removes, Remove_Action{path = path})
|
||||
|
||||
saved_pos = t.pos
|
||||
comma_maybe := tokenizer_next(t)
|
||||
if comma, has := comma_maybe.?; has {
|
||||
if comma == "," {
|
||||
continue
|
||||
}
|
||||
t.pos = saved_pos
|
||||
}
|
||||
break
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ADD clause parsing
|
||||
// ============================================================================
|
||||
|
||||
parse_add_clause :: proc(
|
||||
t: ^Tokenizer,
|
||||
plan: ^Update_Plan,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> bool {
|
||||
saved_pos: int
|
||||
for {
|
||||
saved_pos = t.pos
|
||||
|
||||
path_tok, path_ok := next_token(t)
|
||||
if !path_ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if is_clause_keyword(path_tok) {
|
||||
t.pos = saved_pos
|
||||
return true
|
||||
}
|
||||
|
||||
path, path_resolved := resolve_attribute_name(path_tok, names)
|
||||
if !path_resolved {
|
||||
return false
|
||||
}
|
||||
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
return false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
return false
|
||||
}
|
||||
|
||||
append(&plan.adds, Add_Action{path = path, value = val})
|
||||
|
||||
saved_pos = t.pos
|
||||
comma_maybe := tokenizer_next(t)
|
||||
if comma, has := comma_maybe.?; has {
|
||||
if comma == "," {
|
||||
continue
|
||||
}
|
||||
t.pos = saved_pos
|
||||
}
|
||||
break
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// DELETE clause parsing
|
||||
// ============================================================================
|
||||
|
||||
parse_delete_clause :: proc(
|
||||
t: ^Tokenizer,
|
||||
plan: ^Update_Plan,
|
||||
names: Maybe(map[string]string),
|
||||
values: map[string]Attribute_Value,
|
||||
) -> bool {
|
||||
saved_pos: int
|
||||
for {
|
||||
saved_pos = t.pos
|
||||
|
||||
path_tok, path_ok := next_token(t)
|
||||
if !path_ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if is_clause_keyword(path_tok) {
|
||||
t.pos = saved_pos
|
||||
return true
|
||||
}
|
||||
|
||||
path, path_resolved := resolve_attribute_name(path_tok, names)
|
||||
if !path_resolved {
|
||||
return false
|
||||
}
|
||||
|
||||
val_tok, vt_ok := next_token(t)
|
||||
if !vt_ok {
|
||||
return false
|
||||
}
|
||||
val, val_ok := resolve_attribute_value(val_tok, values)
|
||||
if !val_ok {
|
||||
return false
|
||||
}
|
||||
|
||||
append(&plan.deletes, Delete_Action{path = path, value = val})
|
||||
|
||||
saved_pos = t.pos
|
||||
comma_maybe := tokenizer_next(t)
|
||||
if comma, has := comma_maybe.?; has {
|
||||
if comma == "," {
|
||||
continue
|
||||
}
|
||||
t.pos = saved_pos
|
||||
}
|
||||
break
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helpers
|
||||
// ============================================================================
|
||||
|
||||
is_clause_keyword :: proc(tok: string) -> bool {
|
||||
return strings.equal_fold(tok, "SET") ||
|
||||
strings.equal_fold(tok, "REMOVE") ||
|
||||
strings.equal_fold(tok, "ADD") ||
|
||||
strings.equal_fold(tok, "DELETE")
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Execute Update Plan — apply mutations to an Item (in-place)
|
||||
// ============================================================================
|
||||
|
||||
// Reasons an update plan can fail at execution time.
|
||||
// All of these map to ValidationException at the HTTP layer.
|
||||
Update_Exec_Error :: enum {
|
||||
None,
|
||||
// SET x = source +/- val: source attribute does not exist in the item
|
||||
Operand_Not_Found,
|
||||
// SET x = source +/- val: source or value attribute is not a Number
|
||||
Operand_Not_Number,
|
||||
// SET x = list_append(source, val): source attribute is not a List
|
||||
Operand_Not_List,
|
||||
// ADD path val: existing attribute is not a Number, String_Set, or Number_Set
|
||||
Add_Type_Mismatch,
|
||||
// ADD path val: value type does not match the existing set type
|
||||
Add_Value_Type_Mismatch,
|
||||
}
|
||||
|
||||
execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> Update_Exec_Error {
|
||||
// Execute SET actions
|
||||
for &action in plan.sets {
|
||||
switch action.value_kind {
|
||||
case .Direct:
|
||||
// Remove old value if exists
|
||||
if old, found := item[action.path]; found {
|
||||
old_copy := old
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
}
|
||||
item[strings.clone(action.path)] = attr_value_deep_copy(action.value)
|
||||
|
||||
case .Plus:
|
||||
// Numeric addition: path = source + value or path = value + source
|
||||
existing: Attribute_Value
|
||||
if src, found := item[action.source]; found {
|
||||
existing = src
|
||||
} else {
|
||||
return .Operand_Not_Found
|
||||
}
|
||||
result, add_ok := numeric_add(existing, action.value)
|
||||
if !add_ok {
|
||||
return .Operand_Not_Number
|
||||
}
|
||||
if old, found := item[action.path]; found {
|
||||
old_copy := old
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
}
|
||||
item[strings.clone(action.path)] = result
|
||||
|
||||
case .Minus:
|
||||
// Numeric subtraction: path = source - value
|
||||
existing: Attribute_Value
|
||||
if src, found := item[action.source]; found {
|
||||
existing = src
|
||||
} else {
|
||||
return .Operand_Not_Found
|
||||
}
|
||||
result, sub_ok := numeric_subtract(existing, action.value)
|
||||
if !sub_ok {
|
||||
return .Operand_Not_Number
|
||||
}
|
||||
if old, found := item[action.path]; found {
|
||||
old_copy := old
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
}
|
||||
item[strings.clone(action.path)] = result
|
||||
|
||||
case .If_Not_Exists:
|
||||
// Only set if attribute doesn't exist
|
||||
if _, found := item[action.source]; !found {
|
||||
if old, found2 := item[action.path]; found2 {
|
||||
old_copy := old
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
}
|
||||
item[strings.clone(action.path)] = attr_value_deep_copy(action.value)
|
||||
}
|
||||
// If attribute exists, do nothing (keep current value)
|
||||
|
||||
case .List_Append:
|
||||
// Append to list
|
||||
existing_list: []Attribute_Value
|
||||
if src, found := item[action.source]; found {
|
||||
if l, is_list := src.(List); is_list {
|
||||
existing_list = ([]Attribute_Value)(l)
|
||||
} else {
|
||||
return .Operand_Not_List
|
||||
}
|
||||
} else {
|
||||
existing_list = {}
|
||||
}
|
||||
|
||||
append_list: []Attribute_Value
|
||||
if l, is_list := action.value.(List); is_list {
|
||||
append_list = ([]Attribute_Value)(l)
|
||||
} else {
|
||||
return .Operand_Not_List
|
||||
}
|
||||
|
||||
new_list := make([]Attribute_Value, len(existing_list) + len(append_list))
|
||||
for item_val, i in existing_list {
|
||||
new_list[i] = attr_value_deep_copy(item_val)
|
||||
}
|
||||
for item_val, i in append_list {
|
||||
new_list[len(existing_list) + i] = attr_value_deep_copy(item_val)
|
||||
}
|
||||
|
||||
if old, found := item[action.path]; found {
|
||||
old_copy := old
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
}
|
||||
item[strings.clone(action.path)] = List(new_list)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute REMOVE actions
|
||||
for &action in plan.removes {
|
||||
if old, found := item[action.path]; found {
|
||||
old_copy := old
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute ADD actions
|
||||
for &action in plan.adds {
|
||||
if existing, found := item[action.path]; found {
|
||||
// If existing is a number, add numerically
|
||||
#partial switch v in existing {
|
||||
case DDB_Number:
|
||||
result, add_ok := numeric_add(existing, action.value)
|
||||
if !add_ok {
|
||||
return .Operand_Not_Number
|
||||
}
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
item[strings.clone(action.path)] = result
|
||||
|
||||
case String_Set:
|
||||
// Add elements to string set
|
||||
if new_ss, is_ss := action.value.(String_Set); is_ss {
|
||||
merged := set_union_strings(([]string)(v), ([]string)(new_ss))
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
item[strings.clone(action.path)] = String_Set(merged)
|
||||
} else {
|
||||
return .Add_Value_Type_Mismatch
|
||||
}
|
||||
|
||||
case DDB_Number_Set:
|
||||
if new_ns, is_ns := action.value.(DDB_Number_Set); is_ns {
|
||||
merged := set_union_ddb_numbers(([]DDB_Number)(v), ([]DDB_Number)(new_ns))
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
item[strings.clone(action.path)] = DDB_Number_Set(merged)
|
||||
} else {
|
||||
return .Add_Value_Type_Mismatch
|
||||
}
|
||||
|
||||
case:
|
||||
return .Add_Type_Mismatch
|
||||
}
|
||||
} else {
|
||||
// Attribute doesn't exist — create it
|
||||
item[strings.clone(action.path)] = attr_value_deep_copy(action.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute DELETE actions (remove elements from sets)
|
||||
for &action in plan.deletes {
|
||||
if existing, found := item[action.path]; found {
|
||||
#partial switch v in existing {
|
||||
case String_Set:
|
||||
if del_ss, is_ss := action.value.(String_Set); is_ss {
|
||||
result := set_difference_strings(([]string)(v), ([]string)(del_ss))
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
if len(result) > 0 {
|
||||
item[strings.clone(action.path)] = String_Set(result)
|
||||
} else {
|
||||
delete(result)
|
||||
}
|
||||
}
|
||||
|
||||
case DDB_Number_Set:
|
||||
if del_ns, is_ns := action.value.(DDB_Number_Set); is_ns {
|
||||
result := set_difference_ddb_numbers(([]DDB_Number)(v), ([]DDB_Number)(del_ns))
|
||||
old_copy := existing
|
||||
attr_value_destroy(&old_copy)
|
||||
delete_key(item, action.path)
|
||||
if len(result) > 0 {
|
||||
item[strings.clone(action.path)] = DDB_Number_Set(result)
|
||||
} else {
|
||||
delete(result)
|
||||
}
|
||||
}
|
||||
|
||||
case:
|
||||
// DELETE on non-set type is a no-op in DynamoDB
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return .None
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Numeric helpers
|
||||
// ============================================================================
|
||||
|
||||
numeric_add :: proc(a: Attribute_Value, b: Attribute_Value) -> (Attribute_Value, bool) {
|
||||
a_num, a_ok := a.(DDB_Number)
|
||||
b_num, b_ok := b.(DDB_Number)
|
||||
if !a_ok || !b_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
result, result_ok := add_ddb_numbers(a_num, b_num)
|
||||
if !result_ok {
|
||||
return nil, false
|
||||
}
|
||||
return result, true
|
||||
}
|
||||
|
||||
numeric_subtract :: proc(a: Attribute_Value, b: Attribute_Value) -> (Attribute_Value, bool) {
|
||||
a_num, a_ok := a.(DDB_Number)
|
||||
b_num, b_ok := b.(DDB_Number)
|
||||
if !a_ok || !b_ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
result, result_ok := subtract_ddb_numbers(a_num, b_num)
|
||||
if !result_ok {
|
||||
return nil, false
|
||||
}
|
||||
return result, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Set helpers
|
||||
// ============================================================================
|
||||
|
||||
set_union_strings :: proc(a: []string, b: []string) -> []string {
|
||||
seen := make(map[string]bool, allocator = context.temp_allocator)
|
||||
for s in a {
|
||||
seen[s] = true
|
||||
}
|
||||
for s in b {
|
||||
seen[s] = true
|
||||
}
|
||||
|
||||
result := make([]string, len(seen))
|
||||
i := 0
|
||||
for s in seen {
|
||||
result[i] = strings.clone(s)
|
||||
i += 1
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
set_difference_strings :: proc(a: []string, b: []string) -> []string {
|
||||
to_remove := make(map[string]bool, allocator = context.temp_allocator)
|
||||
for s in b {
|
||||
to_remove[s] = true
|
||||
}
|
||||
|
||||
result := make([dynamic]string)
|
||||
for s in a {
|
||||
if !(s in to_remove) {
|
||||
append(&result, strings.clone(s))
|
||||
}
|
||||
}
|
||||
return result[:]
|
||||
}
|
||||
|
||||
// Union of two DDB_Number slices (dedup by numeric equality)
|
||||
set_union_ddb_numbers :: proc(a: []DDB_Number, b: []DDB_Number) -> []DDB_Number {
|
||||
result := make([dynamic]DDB_Number)
|
||||
|
||||
// Add all from a
|
||||
for num in a {
|
||||
append(&result, clone_ddb_number(num))
|
||||
}
|
||||
|
||||
// Add from b if not already present
|
||||
for num in b {
|
||||
found := false
|
||||
for existing in result {
|
||||
if compare_ddb_numbers(existing, num) == 0 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
append(&result, clone_ddb_number(num))
|
||||
}
|
||||
}
|
||||
|
||||
return result[:]
|
||||
}
|
||||
|
||||
// Difference: elements in a that are NOT in b
|
||||
set_difference_ddb_numbers :: proc(a: []DDB_Number, b: []DDB_Number) -> []DDB_Number {
|
||||
result := make([dynamic]DDB_Number)
|
||||
|
||||
for num in a {
|
||||
in_b := false
|
||||
for del in b {
|
||||
if compare_ddb_numbers(num, del) == 0 {
|
||||
in_b = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !in_b {
|
||||
append(&result, clone_ddb_number(num))
|
||||
}
|
||||
}
|
||||
|
||||
return result[:]
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Request Parsing Helper
|
||||
// ============================================================================
|
||||
|
||||
parse_update_expression_string :: proc(request_body: []byte) -> (expr: string, ok: bool) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return
|
||||
}
|
||||
|
||||
ue_val, found := root["UpdateExpression"]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
ue_str, str_ok := ue_val.(json.String)
|
||||
if !str_ok {
|
||||
return
|
||||
}
|
||||
|
||||
expr = strings.clone(string(ue_str))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
// Parse ReturnValues from request body
|
||||
parse_return_values :: proc(request_body: []byte) -> string {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
|
||||
rv_val, found := root["ReturnValues"]
|
||||
if !found {
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
|
||||
rv_str, str_ok := rv_val.(json.String)
|
||||
if !str_ok {
|
||||
return strings.clone("NONE")
|
||||
}
|
||||
|
||||
return strings.clone(string(rv_str))
|
||||
}
|
||||
161
dynamodb/update_item.odin
Normal file
161
dynamodb/update_item.odin
Normal file
@@ -0,0 +1,161 @@
|
||||
package dynamodb
|
||||
|
||||
import "core:strings"
|
||||
import "core:sync"
|
||||
import "../rocksdb"
|
||||
|
||||
// UpdateItem — fetch existing item, apply update plan, write back
|
||||
// Uses EXCLUSIVE lock (write operation)
|
||||
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI updates are atomic
|
||||
//
|
||||
// Returns:
|
||||
// - old_item: the item BEFORE mutations (if it existed), for ReturnValues
|
||||
// - new_item: the item AFTER mutations
|
||||
// - error
|
||||
update_item :: proc(
|
||||
engine: ^Storage_Engine,
|
||||
table_name: string,
|
||||
key_item: Item,
|
||||
plan: ^Update_Plan,
|
||||
) -> (old_item: Maybe(Item), new_item: Maybe(Item), err: Storage_Error) {
|
||||
table_lock := get_or_create_table_lock(engine, table_name)
|
||||
sync.rw_mutex_lock(table_lock)
|
||||
defer sync.rw_mutex_unlock(table_lock)
|
||||
|
||||
// Get table metadata
|
||||
metadata, meta_err := get_table_metadata(engine, table_name)
|
||||
if meta_err != .None {
|
||||
return nil, nil, meta_err
|
||||
}
|
||||
defer table_metadata_destroy(&metadata, engine.allocator)
|
||||
|
||||
// Extract key from the provided key item
|
||||
key_struct, key_ok := key_from_item(key_item, metadata.key_schema)
|
||||
if !key_ok {
|
||||
return nil, nil, .Missing_Key_Attribute
|
||||
}
|
||||
defer key_destroy(&key_struct)
|
||||
|
||||
// Get key values
|
||||
key_values, kv_ok := key_get_values(&key_struct)
|
||||
if !kv_ok {
|
||||
return nil, nil, .Invalid_Key
|
||||
}
|
||||
|
||||
// Build storage key
|
||||
storage_key := build_data_key(table_name, key_values.pk, key_values.sk)
|
||||
defer delete(storage_key)
|
||||
|
||||
// Fetch existing item (if any)
|
||||
existing_encoded, get_err := rocksdb.db_get(&engine.db, storage_key)
|
||||
existing_item: Item
|
||||
|
||||
if get_err == .None && existing_encoded != nil {
|
||||
defer delete(existing_encoded)
|
||||
|
||||
decoded, decode_ok := decode(existing_encoded)
|
||||
if !decode_ok {
|
||||
return nil, nil, .Serialization_Error
|
||||
}
|
||||
existing_item = decoded
|
||||
// Save old item for ReturnValues (and for GSI cleanup)
|
||||
old_item = item_deep_copy(existing_item)
|
||||
} else if get_err == .NotFound || existing_encoded == nil {
|
||||
// Item doesn't exist yet — start with just the key attributes
|
||||
existing_item = make(Item)
|
||||
|
||||
for ks in metadata.key_schema {
|
||||
if val, found := key_item[ks.attribute_name]; found {
|
||||
existing_item[strings.clone(ks.attribute_name)] = attr_value_deep_copy(val)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
|
||||
// Apply update plan.
|
||||
if exec_err := execute_update_plan(&existing_item, plan); exec_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .Validation_Error
|
||||
}
|
||||
|
||||
// Validate key attributes are still present and correct type
|
||||
validation_err := validate_item_key_types(
|
||||
existing_item, metadata.key_schema, metadata.attribute_definitions,
|
||||
)
|
||||
if validation_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, validation_err
|
||||
}
|
||||
|
||||
// Encode updated item
|
||||
encoded_item, encode_ok := encode(existing_item)
|
||||
if !encode_ok {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .Serialization_Error
|
||||
}
|
||||
defer delete(encoded_item)
|
||||
|
||||
// --- ATOMIC WRITE BATCH: base item + all GSI updates ---
|
||||
batch, batch_err := rocksdb.batch_create()
|
||||
if batch_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
defer rocksdb.batch_destroy(&batch)
|
||||
|
||||
// Add base item write to batch
|
||||
rocksdb.batch_put(&batch, storage_key, encoded_item)
|
||||
|
||||
// Add old GSI entry deletions to batch (if item existed before)
|
||||
if old, has := old_item.?; has {
|
||||
gsi_del_err := gsi_batch_delete_entries(&batch, table_name, old, &metadata)
|
||||
if gsi_del_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
return nil, nil, gsi_del_err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new GSI entry writes to batch
|
||||
gsi_write_err := gsi_batch_write_entries(&batch, table_name, existing_item, &metadata)
|
||||
if gsi_write_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, gsi_write_err
|
||||
}
|
||||
|
||||
// Write batch atomically - ALL or NOTHING
|
||||
write_err := rocksdb.batch_write(&engine.db, &batch)
|
||||
if write_err != .None {
|
||||
item_destroy(&existing_item)
|
||||
if old, has := old_item.?; has {
|
||||
old_copy := old
|
||||
item_destroy(&old_copy)
|
||||
}
|
||||
return nil, nil, .RocksDB_Error
|
||||
}
|
||||
|
||||
new_item = existing_item
|
||||
return old_item, new_item, .None
|
||||
}
|
||||
276
gsi_handlers.odin
Normal file
276
gsi_handlers.odin
Normal file
@@ -0,0 +1,276 @@
|
||||
// gsi_handlers.odin — GSI-related HTTP handler helpers
|
||||
//
|
||||
// This file lives in the main package alongside main.odin.
|
||||
// It provides:
|
||||
// 1. parse_global_secondary_indexes — parse GSI definitions from CreateTable request
|
||||
// 2. parse_index_name — extract IndexName from Query/Scan requests
|
||||
// 3. Projection type helper for response building
|
||||
package main
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:strings"
|
||||
import "dynamodb"
|
||||
|
||||
// ============================================================================
|
||||
// Parse GlobalSecondaryIndexes from CreateTable request body
|
||||
//
|
||||
// DynamoDB CreateTable request format for GSIs:
|
||||
// {
|
||||
// "GlobalSecondaryIndexes": [
|
||||
// {
|
||||
// "IndexName": "email-index",
|
||||
// "KeySchema": [
|
||||
// { "AttributeName": "email", "KeyType": "HASH" },
|
||||
// { "AttributeName": "timestamp", "KeyType": "RANGE" }
|
||||
// ],
|
||||
// "Projection": {
|
||||
// "ProjectionType": "ALL" | "KEYS_ONLY" | "INCLUDE",
|
||||
// "NonKeyAttributes": ["attr1", "attr2"] // only for INCLUDE
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// Returns nil if no GSI definitions are present (valid — GSIs are optional).
|
||||
// ============================================================================
|
||||
|
||||
parse_global_secondary_indexes :: proc(
|
||||
root: json.Object,
|
||||
attr_defs: []dynamodb.Attribute_Definition,
|
||||
) -> Maybe([]dynamodb.Global_Secondary_Index) {
|
||||
gsi_val, found := root["GlobalSecondaryIndexes"]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
|
||||
gsi_arr, ok := gsi_val.(json.Array)
|
||||
if !ok || len(gsi_arr) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
gsis := make([]dynamodb.Global_Secondary_Index, len(gsi_arr))
|
||||
|
||||
for elem, i in gsi_arr {
|
||||
elem_obj, elem_ok := elem.(json.Object)
|
||||
if !elem_ok {
|
||||
cleanup_parsed_gsis(gsis[:i])
|
||||
delete(gsis)
|
||||
return nil
|
||||
}
|
||||
|
||||
gsi, gsi_ok := parse_single_gsi(elem_obj, attr_defs)
|
||||
if !gsi_ok {
|
||||
cleanup_parsed_gsis(gsis[:i])
|
||||
delete(gsis)
|
||||
return nil
|
||||
}
|
||||
|
||||
gsis[i] = gsi
|
||||
}
|
||||
|
||||
return gsis
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
parse_single_gsi :: proc(
|
||||
obj: json.Object,
|
||||
attr_defs: []dynamodb.Attribute_Definition,
|
||||
) -> (dynamodb.Global_Secondary_Index, bool) {
|
||||
gsi: dynamodb.Global_Secondary_Index
|
||||
|
||||
// IndexName (required)
|
||||
idx_val, idx_found := obj["IndexName"]
|
||||
if !idx_found {
|
||||
return {}, false
|
||||
}
|
||||
idx_str, idx_ok := idx_val.(json.String)
|
||||
if !idx_ok {
|
||||
return {}, false
|
||||
}
|
||||
gsi.index_name = strings.clone(string(idx_str))
|
||||
|
||||
// KeySchema (required)
|
||||
ks_val, ks_found := obj["KeySchema"]
|
||||
if !ks_found {
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
ks_arr, ks_ok := ks_val.(json.Array)
|
||||
if !ks_ok || len(ks_arr) == 0 || len(ks_arr) > 2 {
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
key_schema := make([]dynamodb.Key_Schema_Element, len(ks_arr))
|
||||
hash_count := 0
|
||||
|
||||
for ks_elem, j in ks_arr {
|
||||
ks_obj, kobj_ok := ks_elem.(json.Object)
|
||||
if !kobj_ok {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
an_val, an_found := ks_obj["AttributeName"]
|
||||
if !an_found {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
an_str, an_ok := an_val.(json.String)
|
||||
if !an_ok {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
kt_val, kt_found := ks_obj["KeyType"]
|
||||
if !kt_found {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
kt_str, kt_ok := kt_val.(json.String)
|
||||
if !kt_ok {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
kt, kt_parse_ok := dynamodb.key_type_from_string(string(kt_str))
|
||||
if !kt_parse_ok {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
if kt == .HASH {
|
||||
hash_count += 1
|
||||
}
|
||||
|
||||
// Validate that the GSI key attribute is in AttributeDefinitions
|
||||
attr_defined := false
|
||||
for ad in attr_defs {
|
||||
if ad.attribute_name == string(an_str) {
|
||||
attr_defined = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !attr_defined {
|
||||
for k in 0..<j { delete(key_schema[k].attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
key_schema[j] = dynamodb.Key_Schema_Element{
|
||||
attribute_name = strings.clone(string(an_str)),
|
||||
key_type = kt,
|
||||
}
|
||||
}
|
||||
|
||||
// Must have exactly one HASH key
|
||||
if hash_count != 1 {
|
||||
for ks in key_schema { delete(ks.attribute_name) }
|
||||
delete(key_schema)
|
||||
delete(gsi.index_name)
|
||||
return {}, false
|
||||
}
|
||||
|
||||
gsi.key_schema = key_schema
|
||||
|
||||
// Projection (optional — defaults to ALL)
|
||||
gsi.projection.projection_type = .ALL
|
||||
if proj_val, proj_found := obj["Projection"]; proj_found {
|
||||
if proj_obj, proj_ok := proj_val.(json.Object); proj_ok {
|
||||
if pt_val, pt_found := proj_obj["ProjectionType"]; pt_found {
|
||||
if pt_str, pt_ok := pt_val.(json.String); pt_ok {
|
||||
switch string(pt_str) {
|
||||
case "ALL": gsi.projection.projection_type = .ALL
|
||||
case "KEYS_ONLY": gsi.projection.projection_type = .KEYS_ONLY
|
||||
case "INCLUDE": gsi.projection.projection_type = .INCLUDE
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NonKeyAttributes (only valid for INCLUDE projection)
|
||||
if nka_val, nka_found := proj_obj["NonKeyAttributes"]; nka_found {
|
||||
if nka_arr, nka_ok := nka_val.(json.Array); nka_ok && len(nka_arr) > 0 {
|
||||
nka := make([]string, len(nka_arr))
|
||||
for attr_val, k in nka_arr {
|
||||
if attr_str, attr_ok := attr_val.(json.String); attr_ok {
|
||||
nka[k] = strings.clone(string(attr_str))
|
||||
}
|
||||
}
|
||||
gsi.projection.non_key_attributes = nka
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gsi, true
|
||||
}
|
||||
|
||||
@(private = "file")
|
||||
cleanup_parsed_gsis :: proc(gsis: []dynamodb.Global_Secondary_Index) {
|
||||
for gsi in gsis {
|
||||
delete(gsi.index_name)
|
||||
for ks in gsi.key_schema {
|
||||
delete(ks.attribute_name)
|
||||
}
|
||||
delete(gsi.key_schema)
|
||||
if nka, has_nka := gsi.projection.non_key_attributes.?; has_nka {
|
||||
for attr in nka { delete(attr) }
|
||||
delete(nka)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Parse IndexName from Query/Scan request
|
||||
// ============================================================================
|
||||
|
||||
parse_index_name :: proc(request_body: []byte) -> Maybe(string) {
|
||||
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
|
||||
if parse_err != nil {
|
||||
return nil
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
idx_val, found := root["IndexName"]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
|
||||
idx_str, ok := idx_val.(json.String)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return strings.clone(string(idx_str))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Projection type to string for DescribeTable response
|
||||
// ============================================================================
|
||||
|
||||
projection_type_to_string :: proc(pt: dynamodb.Projection_Type) -> string {
|
||||
switch pt {
|
||||
case .ALL: return "ALL"
|
||||
case .KEYS_ONLY: return "KEYS_ONLY"
|
||||
case .INCLUDE: return "INCLUDE"
|
||||
}
|
||||
return "ALL"
|
||||
}
|
||||
110
http.odin
110
http.odin
@@ -6,6 +6,7 @@ import vmem "core:mem/virtual"
|
||||
import "core:net"
|
||||
import "core:strings"
|
||||
import "core:strconv"
|
||||
import "core:thread"
|
||||
|
||||
// HTTP Method enumeration
|
||||
HTTP_Method :: enum {
|
||||
@@ -100,9 +101,16 @@ response_set_body :: proc(resp: ^HTTP_Response, data: []byte) {
|
||||
}
|
||||
|
||||
// Request handler function type
|
||||
// Takes context pointer, request, and request-scoped allocator
|
||||
Request_Handler :: #type proc(ctx: rawptr, request: ^HTTP_Request, request_alloc: mem.Allocator) -> HTTP_Response
|
||||
|
||||
// Parse error enum
|
||||
Parse_Error :: enum {
|
||||
None,
|
||||
Connection_Closed,
|
||||
Invalid_Request,
|
||||
Body_Too_Large,
|
||||
}
|
||||
|
||||
// Server configuration
|
||||
Server_Config :: struct {
|
||||
max_body_size: int, // default 100MB
|
||||
@@ -122,6 +130,13 @@ default_server_config :: proc() -> Server_Config {
|
||||
}
|
||||
}
|
||||
|
||||
// Connection task data - passed to worker threads
|
||||
Connection_Task_Data :: struct {
|
||||
server: ^Server,
|
||||
conn: net.TCP_Socket,
|
||||
source: net.Endpoint,
|
||||
}
|
||||
|
||||
// Server
|
||||
Server :: struct {
|
||||
allocator: mem.Allocator,
|
||||
@@ -168,9 +183,12 @@ server_start :: proc(server: ^Server) -> bool {
|
||||
server.socket = socket
|
||||
server.running = true
|
||||
|
||||
fmt.printfln("HTTP server listening on %v", server.endpoint)
|
||||
fmt.printfln("HTTP server listening on %v (thread-per-connection)", server.endpoint)
|
||||
fmt.printfln(" Max body size: %d MB", server.config.max_body_size / (1024 * 1024))
|
||||
fmt.printfln(" Max headers: %d", server.config.max_headers)
|
||||
fmt.printfln(" Keep-alive: %v", server.config.enable_keep_alive)
|
||||
|
||||
// Accept loop
|
||||
// Accept loop - spawn a thread for each connection
|
||||
for server.running {
|
||||
conn, source, accept_err := net.accept_tcp(socket)
|
||||
if accept_err != nil {
|
||||
@@ -180,9 +198,23 @@ server_start :: proc(server: ^Server) -> bool {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle connection in separate goroutine would go here
|
||||
// For now, handle synchronously (should spawn thread)
|
||||
handle_connection(server, conn, source)
|
||||
// Allocate connection data
|
||||
conn_data := new(Connection_Task_Data, server.allocator)
|
||||
conn_data.server = server
|
||||
conn_data.conn = conn
|
||||
conn_data.source = source
|
||||
|
||||
// Spawn a new thread for this connection
|
||||
t := thread.create(connection_worker_thread)
|
||||
if t != nil {
|
||||
t.init_context = context
|
||||
t.data = conn_data
|
||||
thread.start(t)
|
||||
} else {
|
||||
// Failed to create thread, close connection
|
||||
net.close(conn)
|
||||
free(conn_data, server.allocator)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
@@ -190,12 +222,33 @@ server_start :: proc(server: ^Server) -> bool {
|
||||
|
||||
server_stop :: proc(server: ^Server) {
|
||||
server.running = false
|
||||
|
||||
// Close listening socket
|
||||
if sock, ok := server.socket.?; ok {
|
||||
net.close(sock)
|
||||
server.socket = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Worker thread procedure
|
||||
connection_worker_thread :: proc(t: ^thread.Thread) {
|
||||
defer thread.destroy(t)
|
||||
|
||||
conn_data := cast(^Connection_Task_Data)t.data
|
||||
defer free(conn_data, conn_data.server.allocator)
|
||||
|
||||
handle_connection(conn_data.server, conn_data.conn, conn_data.source)
|
||||
}
|
||||
|
||||
// Create error response
|
||||
make_error_response_simple :: proc(allocator: mem.Allocator, status: HTTP_Status, message: string) -> HTTP_Response {
|
||||
response := response_init(allocator)
|
||||
response_set_status(&response, status)
|
||||
response_add_header(&response, "Content-Type", "text/plain")
|
||||
response_set_body(&response, transmute([]byte)message)
|
||||
return response
|
||||
}
|
||||
|
||||
// Handle a single connection
|
||||
handle_connection :: proc(server: ^Server, conn: net.TCP_Socket, source: net.Endpoint) {
|
||||
defer net.close(conn)
|
||||
@@ -214,13 +267,26 @@ handle_connection :: proc(server: ^Server, conn: net.TCP_Socket, source: net.End
|
||||
|
||||
request_alloc := vmem.arena_allocator(&arena)
|
||||
|
||||
// TODO: Double check if we want *all* downstream allocations to use the request arena?
|
||||
// Set request arena as context allocator for downstream allocations
|
||||
old := context.allocator
|
||||
context.allocator = request_alloc
|
||||
defer context.allocator = old
|
||||
|
||||
request, parse_ok := parse_request(conn, request_alloc, server.config)
|
||||
if !parse_ok {
|
||||
request, parse_err := parse_request(conn, request_alloc, server.config)
|
||||
|
||||
// Handle parse errors
|
||||
if parse_err != .None {
|
||||
#partial switch parse_err {
|
||||
case .Body_Too_Large:
|
||||
// Send 413 Payload Too Large
|
||||
response := make_error_response_simple(request_alloc, .Payload_Too_Large,
|
||||
fmt.tprintf("Request body exceeds maximum size of %d bytes", server.config.max_body_size))
|
||||
send_response(conn, &response, request_alloc)
|
||||
case .Invalid_Request:
|
||||
// Send 400 Bad Request
|
||||
response := make_error_response_simple(request_alloc, .Bad_Request, "Invalid HTTP request")
|
||||
send_response(conn, &response, request_alloc)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
@@ -250,13 +316,13 @@ parse_request :: proc(
|
||||
conn: net.TCP_Socket,
|
||||
allocator: mem.Allocator,
|
||||
config: Server_Config,
|
||||
) -> (HTTP_Request, bool) {
|
||||
) -> (HTTP_Request, Parse_Error) {
|
||||
// Read request line and headers
|
||||
buffer := make([]byte, config.read_buffer_size, allocator)
|
||||
|
||||
bytes_read, read_err := net.recv_tcp(conn, buffer)
|
||||
if read_err != nil || bytes_read == 0 {
|
||||
return {}, false
|
||||
return {}, .Connection_Closed
|
||||
}
|
||||
|
||||
request_data := buffer[:bytes_read]
|
||||
@@ -264,7 +330,7 @@ parse_request :: proc(
|
||||
// Find end of headers (\r\n\r\n)
|
||||
header_end_idx := strings.index(string(request_data), "\r\n\r\n")
|
||||
if header_end_idx < 0 {
|
||||
return {}, false
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
|
||||
header_section := string(request_data[:header_end_idx])
|
||||
@@ -273,13 +339,13 @@ parse_request :: proc(
|
||||
// Parse request line
|
||||
lines := strings.split_lines(header_section, allocator)
|
||||
if len(lines) == 0 {
|
||||
return {}, false
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
|
||||
request_line := lines[0]
|
||||
parts := strings.split(request_line, " ", allocator)
|
||||
if len(parts) < 3 {
|
||||
return {}, false
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
|
||||
method := method_from_string(parts[0])
|
||||
@@ -305,6 +371,11 @@ parse_request :: proc(
|
||||
name = strings.clone(name, allocator),
|
||||
value = strings.clone(value, allocator),
|
||||
})
|
||||
|
||||
// Check max headers limit
|
||||
if len(headers) > config.max_headers {
|
||||
return {}, .Invalid_Request
|
||||
}
|
||||
}
|
||||
|
||||
// Read body if Content-Length present
|
||||
@@ -314,7 +385,12 @@ parse_request :: proc(
|
||||
if cl, ok := content_length_header.?; ok {
|
||||
content_length := strconv.parse_int(cl) or_else 0
|
||||
|
||||
if content_length > 0 && content_length <= config.max_body_size {
|
||||
// Check if body size exceeds limit
|
||||
if content_length > config.max_body_size {
|
||||
return {}, .Body_Too_Large
|
||||
}
|
||||
|
||||
if content_length > 0 {
|
||||
// Check if we already have the body in buffer
|
||||
existing_body := request_data[body_start:]
|
||||
|
||||
@@ -336,7 +412,7 @@ parse_request :: proc(
|
||||
|
||||
n, err := net.recv_tcp(conn, chunk)
|
||||
if err != nil || n == 0 {
|
||||
return {}, false
|
||||
return {}, .Connection_Closed
|
||||
}
|
||||
|
||||
copy(body[body_written:], chunk[:n])
|
||||
@@ -352,7 +428,7 @@ parse_request :: proc(
|
||||
path = path,
|
||||
headers = headers[:],
|
||||
body = body,
|
||||
}, true
|
||||
}, .None
|
||||
}
|
||||
|
||||
// Helper to get header from slice
|
||||
|
||||
884
open_api_doc.yaml
Normal file
884
open_api_doc.yaml
Normal file
@@ -0,0 +1,884 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: JormunDB DynamoDB Wire API
|
||||
version: 0.1.0
|
||||
description: |
|
||||
DynamoDB-compatible JSON-over-HTTP API implemented by JormunDB.
|
||||
Requests are POSTed to a single endpoint (/) and routed by the required `X-Amz-Target` header.
|
||||
servers:
|
||||
- url: http://localhost:8002
|
||||
|
||||
paths:
|
||||
/:
|
||||
post:
|
||||
summary: DynamoDB JSON API endpoint
|
||||
description: |
|
||||
Send DynamoDB JSON protocol requests to this endpoint and set `X-Amz-Target` to the operation name,
|
||||
e.g. `DynamoDB_20120810.GetItem`. The request and response media type is typically
|
||||
`application/x-amz-json-1.0`.
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/XAmzTarget'
|
||||
- $ref: '#/components/parameters/XAmzDate'
|
||||
- $ref: '#/components/parameters/Authorization'
|
||||
- $ref: '#/components/parameters/XAmzSecurityToken'
|
||||
- $ref: '#/components/parameters/XAmzContentSha256'
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/CreateTableRequest'
|
||||
- $ref: '#/components/schemas/DeleteTableRequest'
|
||||
- $ref: '#/components/schemas/DescribeTableRequest'
|
||||
- $ref: '#/components/schemas/ListTablesRequest'
|
||||
- $ref: '#/components/schemas/PutItemRequest'
|
||||
- $ref: '#/components/schemas/GetItemRequest'
|
||||
- $ref: '#/components/schemas/DeleteItemRequest'
|
||||
- $ref: '#/components/schemas/UpdateItemRequest'
|
||||
- $ref: '#/components/schemas/QueryRequest'
|
||||
- $ref: '#/components/schemas/ScanRequest'
|
||||
- $ref: '#/components/schemas/BatchWriteItemRequest'
|
||||
- $ref: '#/components/schemas/BatchGetItemRequest'
|
||||
- $ref: '#/components/schemas/TransactWriteItemsRequest'
|
||||
- $ref: '#/components/schemas/TransactGetItemsRequest'
|
||||
examples:
|
||||
CreateTable:
|
||||
summary: Create a table with a HASH key
|
||||
value:
|
||||
TableName: ExampleTable
|
||||
KeySchema:
|
||||
- AttributeName: pk
|
||||
KeyType: HASH
|
||||
AttributeDefinitions:
|
||||
- AttributeName: pk
|
||||
AttributeType: S
|
||||
responses:
|
||||
'200':
|
||||
description: Successful operation response
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/CreateTableResponse'
|
||||
- $ref: '#/components/schemas/DeleteTableResponse'
|
||||
- $ref: '#/components/schemas/DescribeTableResponse'
|
||||
- $ref: '#/components/schemas/ListTablesResponse'
|
||||
- $ref: '#/components/schemas/PutItemResponse'
|
||||
- $ref: '#/components/schemas/GetItemResponseUnion'
|
||||
- $ref: '#/components/schemas/DeleteItemResponse'
|
||||
- $ref: '#/components/schemas/UpdateItemResponseUnion'
|
||||
- $ref: '#/components/schemas/QueryResponse'
|
||||
- $ref: '#/components/schemas/ScanResponse'
|
||||
- $ref: '#/components/schemas/BatchWriteItemResponse'
|
||||
- $ref: '#/components/schemas/BatchGetItemResponse'
|
||||
- $ref: '#/components/schemas/TransactWriteItemsResponse'
|
||||
- $ref: '#/components/schemas/TransactGetItemsResponse'
|
||||
'400':
|
||||
description: Client error (ValidationException, SerializationException, etc.)
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/DynamoDbError'
|
||||
- $ref: '#/components/schemas/TransactionCanceledException'
|
||||
'500':
|
||||
description: Server error
|
||||
content:
|
||||
application/x-amz-json-1.0:
|
||||
schema:
|
||||
$ref: '#/components/schemas/DynamoDbError'
|
||||
|
||||
components:
|
||||
parameters:
|
||||
XAmzTarget:
|
||||
name: X-Amz-Target
|
||||
in: header
|
||||
required: true
|
||||
description: |
|
||||
DynamoDB JSON protocol operation selector.
|
||||
JormunDB recognizes targets with the `DynamoDB_20120810.` prefix.
|
||||
Note: `UpdateTable` may be recognized but not implemented.
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- DynamoDB_20120810.CreateTable
|
||||
- DynamoDB_20120810.DeleteTable
|
||||
- DynamoDB_20120810.DescribeTable
|
||||
- DynamoDB_20120810.ListTables
|
||||
- DynamoDB_20120810.UpdateTable
|
||||
- DynamoDB_20120810.PutItem
|
||||
- DynamoDB_20120810.GetItem
|
||||
- DynamoDB_20120810.DeleteItem
|
||||
- DynamoDB_20120810.UpdateItem
|
||||
- DynamoDB_20120810.Query
|
||||
- DynamoDB_20120810.Scan
|
||||
- DynamoDB_20120810.BatchGetItem
|
||||
- DynamoDB_20120810.BatchWriteItem
|
||||
- DynamoDB_20120810.TransactGetItems
|
||||
- DynamoDB_20120810.TransactWriteItems
|
||||
example: DynamoDB_20120810.GetItem
|
||||
|
||||
XAmzDate:
|
||||
name: X-Amz-Date
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 timestamp header (kept for SDK compatibility).
|
||||
|
||||
Authorization:
|
||||
name: Authorization
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 Authorization header (kept for SDK compatibility).
|
||||
|
||||
XAmzSecurityToken:
|
||||
name: X-Amz-Security-Token
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 session token header (kept for SDK compatibility).
|
||||
|
||||
XAmzContentSha256:
|
||||
name: X-Amz-Content-Sha256
|
||||
in: header
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
description: Optional SigV4 payload hash header (kept for SDK compatibility).
|
||||
|
||||
schemas:
|
||||
EmptyObject:
|
||||
type: object
|
||||
description: Empty JSON object.
|
||||
additionalProperties: false
|
||||
|
||||
# -------------------------
|
||||
# AttributeValue & helpers
|
||||
# -------------------------
|
||||
AttributeValue:
|
||||
description: DynamoDB AttributeValue (JSON wire format).
|
||||
type: object
|
||||
minProperties: 1
|
||||
maxProperties: 1
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/AttrS'
|
||||
- $ref: '#/components/schemas/AttrN'
|
||||
- $ref: '#/components/schemas/AttrB'
|
||||
- $ref: '#/components/schemas/AttrBOOL'
|
||||
- $ref: '#/components/schemas/AttrNULL'
|
||||
- $ref: '#/components/schemas/AttrSS'
|
||||
- $ref: '#/components/schemas/AttrNS'
|
||||
- $ref: '#/components/schemas/AttrBS'
|
||||
- $ref: '#/components/schemas/AttrL'
|
||||
- $ref: '#/components/schemas/AttrM'
|
||||
|
||||
AttrS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [S]
|
||||
properties:
|
||||
S:
|
||||
type: string
|
||||
example: hello
|
||||
|
||||
AttrN:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [N]
|
||||
properties:
|
||||
N:
|
||||
type: string
|
||||
description: Numeric values are encoded as strings in DynamoDB's JSON protocol.
|
||||
example: "42"
|
||||
|
||||
AttrB:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [B]
|
||||
properties:
|
||||
B:
|
||||
type: string
|
||||
description: Base64-encoded binary value.
|
||||
example: AAECAwQ=
|
||||
|
||||
AttrBOOL:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [BOOL]
|
||||
properties:
|
||||
BOOL:
|
||||
type: boolean
|
||||
example: true
|
||||
|
||||
AttrNULL:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [NULL]
|
||||
properties:
|
||||
NULL:
|
||||
type: boolean
|
||||
enum: [true]
|
||||
example: true
|
||||
|
||||
AttrSS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [SS]
|
||||
properties:
|
||||
SS:
|
||||
type: array
|
||||
items: { type: string }
|
||||
example: [a, b]
|
||||
|
||||
AttrNS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [NS]
|
||||
properties:
|
||||
NS:
|
||||
type: array
|
||||
description: Numeric set values are encoded as strings.
|
||||
items: { type: string }
|
||||
example: ["1", "2"]
|
||||
|
||||
AttrBS:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [BS]
|
||||
properties:
|
||||
BS:
|
||||
type: array
|
||||
description: Base64-encoded binary set values.
|
||||
items: { type: string }
|
||||
example: [AAE=, AgM=]
|
||||
|
||||
AttrL:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [L]
|
||||
properties:
|
||||
L:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/AttributeValue'
|
||||
|
||||
AttrM:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [M]
|
||||
properties:
|
||||
M:
|
||||
$ref: '#/components/schemas/AttributeMap'
|
||||
|
||||
AttributeMap:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/AttributeValue'
|
||||
example:
|
||||
pk: { S: "user#1" }
|
||||
sk: { S: "meta" }
|
||||
age: { N: "30" }
|
||||
|
||||
ExpressionAttributeNames:
|
||||
type: object
|
||||
additionalProperties: { type: string }
|
||||
example:
|
||||
"#pk": "pk"
|
||||
|
||||
ExpressionAttributeValues:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/AttributeValue'
|
||||
example:
|
||||
":v": { S: "user#1" }
|
||||
|
||||
Key:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/AttributeMap'
|
||||
description: Primary key map (HASH, optionally RANGE) encoded as an AttributeMap.
|
||||
|
||||
ReturnValues:
|
||||
type: string
|
||||
description: ReturnValues selector used by UpdateItem.
|
||||
enum: [NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW]
|
||||
example: ALL_NEW
|
||||
|
||||
# -------------------------
|
||||
# Table shapes
|
||||
# -------------------------
|
||||
ScalarAttributeType:
|
||||
type: string
|
||||
enum: [S, N, B]
|
||||
example: S
|
||||
|
||||
AttributeDefinition:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [AttributeName, AttributeType]
|
||||
properties:
|
||||
AttributeName: { type: string }
|
||||
AttributeType: { $ref: '#/components/schemas/ScalarAttributeType' }
|
||||
|
||||
KeyType:
|
||||
type: string
|
||||
enum: [HASH, RANGE]
|
||||
example: HASH
|
||||
|
||||
KeySchemaElement:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [AttributeName, KeyType]
|
||||
properties:
|
||||
AttributeName: { type: string }
|
||||
KeyType: { $ref: '#/components/schemas/KeyType' }
|
||||
|
||||
ProjectionType:
|
||||
type: string
|
||||
enum: [KEYS_ONLY, INCLUDE, ALL]
|
||||
example: ALL
|
||||
|
||||
Projection:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [ProjectionType]
|
||||
properties:
|
||||
ProjectionType: { $ref: '#/components/schemas/ProjectionType' }
|
||||
NonKeyAttributes:
|
||||
type: array
|
||||
items: { type: string }
|
||||
|
||||
GlobalSecondaryIndex:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [IndexName, KeySchema, Projection]
|
||||
properties:
|
||||
IndexName: { type: string }
|
||||
KeySchema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/KeySchemaElement' }
|
||||
minItems: 1
|
||||
Projection: { $ref: '#/components/schemas/Projection' }
|
||||
|
||||
TableStatus:
|
||||
type: string
|
||||
enum: [CREATING, UPDATING, DELETING, ACTIVE, ARCHIVING, ARCHIVED]
|
||||
example: ACTIVE
|
||||
|
||||
TableDescription:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableName, TableStatus]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
TableStatus: { $ref: '#/components/schemas/TableStatus' }
|
||||
CreationDateTime:
|
||||
type: integer
|
||||
format: int64
|
||||
description: Unix epoch seconds.
|
||||
KeySchema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/KeySchemaElement' }
|
||||
AttributeDefinitions:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeDefinition' }
|
||||
GlobalSecondaryIndexes:
|
||||
type: array
|
||||
items:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/GlobalSecondaryIndex'
|
||||
- type: object
|
||||
properties:
|
||||
IndexStatus:
|
||||
type: string
|
||||
enum: [ACTIVE]
|
||||
|
||||
# -------------------------
|
||||
# Error shapes
|
||||
# -------------------------
|
||||
DynamoDbError:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [__type, message]
|
||||
properties:
|
||||
__type:
|
||||
type: string
|
||||
description: DynamoDB error type identifier.
|
||||
example: com.amazonaws.dynamodb.v20120810#ValidationException
|
||||
message:
|
||||
type: string
|
||||
example: Invalid request
|
||||
|
||||
TransactionCanceledException:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [__type, message, CancellationReasons]
|
||||
properties:
|
||||
__type:
|
||||
type: string
|
||||
enum: [com.amazonaws.dynamodb.v20120810#TransactionCanceledException]
|
||||
message:
|
||||
type: string
|
||||
CancellationReasons:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Code, Message]
|
||||
properties:
|
||||
Code: { type: string, example: ConditionalCheckFailed }
|
||||
Message: { type: string, example: The conditional request failed }
|
||||
|
||||
# -------------------------
|
||||
# API: CreateTable
|
||||
# -------------------------
|
||||
CreateTableRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, KeySchema, AttributeDefinitions]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
KeySchema:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/KeySchemaElement' }
|
||||
minItems: 1
|
||||
AttributeDefinitions:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeDefinition' }
|
||||
minItems: 1
|
||||
GlobalSecondaryIndexes:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/GlobalSecondaryIndex' }
|
||||
description: |
|
||||
CreateTable request. JormunDB focuses on TableName, KeySchema, AttributeDefinitions, and optional GSI definitions.
|
||||
|
||||
CreateTableResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableDescription]
|
||||
properties:
|
||||
TableDescription:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableName, TableStatus, CreationDateTime]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
TableStatus: { $ref: '#/components/schemas/TableStatus' }
|
||||
CreationDateTime: { type: integer, format: int64 }
|
||||
|
||||
# -------------------------
|
||||
# API: DeleteTable / DescribeTable / ListTables
|
||||
# -------------------------
|
||||
DeleteTableRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
|
||||
DeleteTableResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableDescription]
|
||||
properties:
|
||||
TableDescription:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableName, TableStatus]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
TableStatus:
|
||||
type: string
|
||||
enum: [DELETING]
|
||||
|
||||
DescribeTableRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
|
||||
DescribeTableResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Table]
|
||||
properties:
|
||||
Table: { $ref: '#/components/schemas/TableDescription' }
|
||||
|
||||
ListTablesRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
description: ListTables request. JormunDB ignores request fields for this operation.
|
||||
|
||||
ListTablesResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [TableNames]
|
||||
properties:
|
||||
TableNames:
|
||||
type: array
|
||||
items: { type: string }
|
||||
|
||||
# -------------------------
|
||||
# API: PutItem / GetItem / DeleteItem
|
||||
# -------------------------
|
||||
PutItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Item]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
PutItemResponse:
|
||||
$ref: '#/components/schemas/EmptyObject'
|
||||
|
||||
GetItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
|
||||
GetItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Item]
|
||||
properties:
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
|
||||
GetItemResponseUnion:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/EmptyObject'
|
||||
- $ref: '#/components/schemas/GetItemResponse'
|
||||
|
||||
DeleteItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
DeleteItemResponse:
|
||||
$ref: '#/components/schemas/EmptyObject'
|
||||
|
||||
# -------------------------
|
||||
# API: UpdateItem
|
||||
# -------------------------
|
||||
UpdateItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key, UpdateExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
UpdateExpression: { type: string }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
ReturnValues: { $ref: '#/components/schemas/ReturnValues' }
|
||||
|
||||
UpdateItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Attributes]
|
||||
properties:
|
||||
Attributes: { $ref: '#/components/schemas/AttributeMap' }
|
||||
|
||||
UpdateItemResponseUnion:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/EmptyObject'
|
||||
- $ref: '#/components/schemas/UpdateItemResponse'
|
||||
|
||||
# -------------------------
|
||||
# API: Query / Scan
|
||||
# -------------------------
|
||||
QueryRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, KeyConditionExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
IndexName: { type: string }
|
||||
KeyConditionExpression: { type: string }
|
||||
FilterExpression: { type: string }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
Limit:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 1
|
||||
description: Maximum items to return (default 100 if omitted/0 in JormunDB).
|
||||
ExclusiveStartKey: { $ref: '#/components/schemas/Key' }
|
||||
ScanIndexForward:
|
||||
type: boolean
|
||||
description: Sort order for RANGE key queries (if applicable).
|
||||
|
||||
ScanRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
IndexName: { type: string }
|
||||
FilterExpression: { type: string }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
Limit:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 1
|
||||
description: Maximum items to return (default 100 if omitted/0 in JormunDB).
|
||||
ExclusiveStartKey: { $ref: '#/components/schemas/Key' }
|
||||
|
||||
ItemsPage:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Items, Count, ScannedCount]
|
||||
properties:
|
||||
Items:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeMap' }
|
||||
Count:
|
||||
type: integer
|
||||
format: int32
|
||||
ScannedCount:
|
||||
type: integer
|
||||
format: int32
|
||||
LastEvaluatedKey:
|
||||
$ref: '#/components/schemas/Key'
|
||||
|
||||
QueryResponse:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ItemsPage'
|
||||
|
||||
ScanResponse:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ItemsPage'
|
||||
|
||||
# -------------------------
|
||||
# API: BatchWriteItem
|
||||
# -------------------------
|
||||
WriteRequest:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
properties:
|
||||
PutRequest:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Item]
|
||||
properties:
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
DeleteRequest:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Key]
|
||||
properties:
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
oneOf:
|
||||
- required: [PutRequest]
|
||||
- required: [DeleteRequest]
|
||||
|
||||
BatchWriteItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [RequestItems]
|
||||
properties:
|
||||
RequestItems:
|
||||
type: object
|
||||
description: Map of table name to write requests.
|
||||
additionalProperties:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/WriteRequest' }
|
||||
|
||||
BatchWriteItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [UnprocessedItems]
|
||||
properties:
|
||||
UnprocessedItems:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/WriteRequest' }
|
||||
|
||||
# -------------------------
|
||||
# API: BatchGetItem
|
||||
# -------------------------
|
||||
KeysAndAttributes:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [Keys]
|
||||
properties:
|
||||
Keys:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/Key' }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
|
||||
BatchGetItemRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [RequestItems]
|
||||
properties:
|
||||
RequestItems:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/KeysAndAttributes'
|
||||
|
||||
BatchGetItemResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Responses, UnprocessedKeys]
|
||||
properties:
|
||||
Responses:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/AttributeMap' }
|
||||
UnprocessedKeys:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/KeysAndAttributes'
|
||||
|
||||
# -------------------------
|
||||
# API: TransactWriteItems / TransactGetItems
|
||||
# -------------------------
|
||||
TransactWriteItemsRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TransactItems]
|
||||
properties:
|
||||
TransactItems:
|
||||
type: array
|
||||
minItems: 1
|
||||
maxItems: 100
|
||||
items:
|
||||
$ref: '#/components/schemas/TransactWriteItem'
|
||||
|
||||
TransactWriteItem:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
oneOf:
|
||||
- required: [Put]
|
||||
- required: [Delete]
|
||||
- required: [Update]
|
||||
- required: [ConditionCheck]
|
||||
properties:
|
||||
Put:
|
||||
$ref: '#/components/schemas/TransactPut'
|
||||
Delete:
|
||||
$ref: '#/components/schemas/TransactDelete'
|
||||
Update:
|
||||
$ref: '#/components/schemas/TransactUpdate'
|
||||
ConditionCheck:
|
||||
$ref: '#/components/schemas/TransactConditionCheck'
|
||||
|
||||
TransactPut:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Item]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactDelete:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactUpdate:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key, UpdateExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
UpdateExpression: { type: string }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactConditionCheck:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key, ConditionExpression]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ConditionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
ExpressionAttributeValues: { $ref: '#/components/schemas/ExpressionAttributeValues' }
|
||||
|
||||
TransactWriteItemsResponse:
|
||||
$ref: '#/components/schemas/EmptyObject'
|
||||
|
||||
TransactGetItemsRequest:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TransactItems]
|
||||
properties:
|
||||
TransactItems:
|
||||
type: array
|
||||
minItems: 1
|
||||
maxItems: 100
|
||||
items:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Get]
|
||||
properties:
|
||||
Get:
|
||||
$ref: '#/components/schemas/TransactGet'
|
||||
|
||||
TransactGet:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
required: [TableName, Key]
|
||||
properties:
|
||||
TableName: { type: string }
|
||||
Key: { $ref: '#/components/schemas/Key' }
|
||||
ProjectionExpression: { type: string }
|
||||
ExpressionAttributeNames: { $ref: '#/components/schemas/ExpressionAttributeNames' }
|
||||
|
||||
TransactGetItemResult:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/EmptyObject'
|
||||
- type: object
|
||||
additionalProperties: false
|
||||
required: [Item]
|
||||
properties:
|
||||
Item: { $ref: '#/components/schemas/AttributeMap' }
|
||||
|
||||
TransactGetItemsResponse:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required: [Responses]
|
||||
properties:
|
||||
Responses:
|
||||
type: array
|
||||
items: { $ref: '#/components/schemas/TransactGetItemResult' }
|
||||
3337
project_context.txt
3337
project_context.txt
File diff suppressed because it is too large
Load Diff
@@ -1,22 +1,167 @@
|
||||
// TODO: In order to use RocksDB's WAL replication helpers, we need to import the C++ library so we use this shim
|
||||
/**
|
||||
C++ shim implementation notes (the important bits)
|
||||
#include "rocksdb_shim.h"
|
||||
#include <rocksdb/db.h>
|
||||
#include <rocksdb/options.h>
|
||||
#include <rocksdb/slice.h>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
In this rocksdb_shim.cc we'll need to use:
|
||||
// Internal structure wrapping rocksdb::DB
|
||||
struct jormun_db {
|
||||
rocksdb::DB* db;
|
||||
};
|
||||
|
||||
rocksdb::DB::Open(...)
|
||||
// Placeholder for WAL iterator (not implemented yet)
|
||||
struct jormun_wal_iter {
|
||||
// TODO: Implement with TransactionLogIterator when needed
|
||||
void* placeholder;
|
||||
};
|
||||
|
||||
db->GetLatestSequenceNumber()
|
||||
// Open database
|
||||
jormun_db* jormun_db_open(const char* path, int create_if_missing, char** err) {
|
||||
rocksdb::Options options;
|
||||
options.create_if_missing = create_if_missing != 0;
|
||||
|
||||
db->GetUpdatesSince(seq, &iter)
|
||||
rocksdb::DB* db_ptr = nullptr;
|
||||
rocksdb::Status status = rocksdb::DB::Open(options, path, &db_ptr);
|
||||
|
||||
from each TransactionLogIterator entry:
|
||||
if (!status.ok()) {
|
||||
if (err) {
|
||||
std::string error_msg = status.ToString();
|
||||
*err = strdup(error_msg.c_str());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
get WriteBatch and serialize via WriteBatch::Data()
|
||||
jormun_db* jdb = new jormun_db;
|
||||
jdb->db = db_ptr;
|
||||
return jdb;
|
||||
}
|
||||
|
||||
apply via rocksdb::WriteBatch wb(data); db->Write(write_options, &wb);
|
||||
// Close database
|
||||
void jormun_db_close(jormun_db* db) {
|
||||
if (db) {
|
||||
delete db->db;
|
||||
delete db;
|
||||
}
|
||||
}
|
||||
|
||||
Also we must configure WAL retention so the followers don’t fall off the end. RocksDB warns the iterator can become invalid if WAL is cleared aggressively; typical controls are WAL TTL / size limit.
|
||||
// Put key-value pair
|
||||
void jormun_db_put(jormun_db* db,
|
||||
const void* key, size_t keylen,
|
||||
const void* val, size_t vallen,
|
||||
char** err) {
|
||||
if (!db || !db->db) {
|
||||
if (err) *err = strdup("Database is null");
|
||||
return;
|
||||
}
|
||||
|
||||
https://github.com/facebook/rocksdb/issues/1565
|
||||
*/
|
||||
rocksdb::WriteOptions write_options;
|
||||
rocksdb::Slice key_slice(static_cast<const char*>(key), keylen);
|
||||
rocksdb::Slice val_slice(static_cast<const char*>(val), vallen);
|
||||
|
||||
rocksdb::Status status = db->db->Put(write_options, key_slice, val_slice);
|
||||
|
||||
if (!status.ok() && err) {
|
||||
std::string error_msg = status.ToString();
|
||||
*err = strdup(error_msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
// Get value for key
|
||||
unsigned char* jormun_db_get(jormun_db* db,
|
||||
const void* key, size_t keylen,
|
||||
size_t* vallen,
|
||||
char** err) {
|
||||
if (!db || !db->db) {
|
||||
if (err) *err = strdup("Database is null");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
rocksdb::ReadOptions read_options;
|
||||
rocksdb::Slice key_slice(static_cast<const char*>(key), keylen);
|
||||
std::string value;
|
||||
|
||||
rocksdb::Status status = db->db->Get(read_options, key_slice, &value);
|
||||
|
||||
if (status.IsNotFound()) {
|
||||
*vallen = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!status.ok()) {
|
||||
if (err) {
|
||||
std::string error_msg = status.ToString();
|
||||
*err = strdup(error_msg.c_str());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Allocate and copy value
|
||||
*vallen = value.size();
|
||||
unsigned char* result = static_cast<unsigned char*>(malloc(value.size()));
|
||||
if (result) {
|
||||
memcpy(result, value.data(), value.size());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Free memory allocated by the shim
|
||||
void jormun_free(void* p) {
|
||||
free(p);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// WAL Replication Functions (Stubs for now - to be implemented)
|
||||
// ============================================================================
|
||||
|
||||
// Get latest sequence number
|
||||
uint64_t jormun_latest_sequence(jormun_db* db) {
|
||||
if (!db || !db->db) return 0;
|
||||
return db->db->GetLatestSequenceNumber();
|
||||
}
|
||||
|
||||
// Create WAL iterator (stub)
|
||||
jormun_wal_iter* jormun_wal_iter_create(jormun_db* db, uint64_t seq, char** err) {
|
||||
(void)db;
|
||||
(void)seq;
|
||||
if (err) {
|
||||
*err = strdup("WAL iteration not yet implemented");
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Destroy WAL iterator (stub)
|
||||
void jormun_wal_iter_destroy(jormun_wal_iter* it) {
|
||||
if (it) {
|
||||
delete it;
|
||||
}
|
||||
}
|
||||
|
||||
// Get next batch from WAL (stub)
|
||||
int jormun_wal_iter_next(jormun_wal_iter* it,
|
||||
uint64_t* batch_start_seq,
|
||||
unsigned char** out_data,
|
||||
size_t* out_len,
|
||||
char** err) {
|
||||
(void)it;
|
||||
(void)batch_start_seq;
|
||||
(void)out_data;
|
||||
(void)out_len;
|
||||
if (err) {
|
||||
*err = strdup("WAL iteration not yet implemented");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Apply write batch (stub)
|
||||
void jormun_apply_writebatch(jormun_db* db,
|
||||
const unsigned char* data, size_t len,
|
||||
char** err) {
|
||||
(void)db;
|
||||
(void)data;
|
||||
(void)len;
|
||||
if (err) {
|
||||
*err = strdup("WAL apply not yet implemented");
|
||||
}
|
||||
}
|
||||
|
||||
599
transact_handlers.odin
Normal file
599
transact_handlers.odin
Normal file
@@ -0,0 +1,599 @@
|
||||
// transact_handlers.odin — HTTP handlers for TransactWriteItems and TransactGetItems
|
||||
//
|
||||
// Also contains the UPDATED_NEW / UPDATED_OLD filtering helper for UpdateItem.
|
||||
package main
|
||||
|
||||
import "core:encoding/json"
|
||||
import "core:fmt"
|
||||
import "core:strings"
|
||||
import "dynamodb"
|
||||
|
||||
// ============================================================================
|
||||
// TransactWriteItems Handler
|
||||
//
|
||||
// Request format:
|
||||
// {
|
||||
// "TransactItems": [
|
||||
// {
|
||||
// "Put": {
|
||||
// "TableName": "...",
|
||||
// "Item": { ... },
|
||||
// "ConditionExpression": "...", // optional
|
||||
// "ExpressionAttributeNames": { ... }, // optional
|
||||
// "ExpressionAttributeValues": { ... } // optional
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// "Delete": {
|
||||
// "TableName": "...",
|
||||
// "Key": { ... },
|
||||
// "ConditionExpression": "...", // optional
|
||||
// ...
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// "Update": {
|
||||
// "TableName": "...",
|
||||
// "Key": { ... },
|
||||
// "UpdateExpression": "...",
|
||||
// "ConditionExpression": "...", // optional
|
||||
// "ExpressionAttributeNames": { ... }, // optional
|
||||
// "ExpressionAttributeValues": { ... } // optional
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// "ConditionCheck": {
|
||||
// "TableName": "...",
|
||||
// "Key": { ... },
|
||||
// "ConditionExpression": "...",
|
||||
// "ExpressionAttributeNames": { ... }, // optional
|
||||
// "ExpressionAttributeValues": { ... } // optional
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// ============================================================================
|
||||
|
||||
handle_transact_write_items :: proc(
|
||||
engine: ^dynamodb.Storage_Engine,
|
||||
request: ^HTTP_Request,
|
||||
response: ^HTTP_Response,
|
||||
) {
|
||||
data, parse_err := json.parse(request.body, allocator = context.allocator)
|
||||
if parse_err != nil {
|
||||
make_error_response(response, .SerializationException, "Invalid JSON")
|
||||
return
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
make_error_response(response, .SerializationException, "Request must be an object")
|
||||
return
|
||||
}
|
||||
|
||||
transact_items_val, found := root["TransactItems"]
|
||||
if !found {
|
||||
make_error_response(response, .ValidationException, "Missing TransactItems")
|
||||
return
|
||||
}
|
||||
|
||||
transact_items, ti_ok := transact_items_val.(json.Array)
|
||||
if !ti_ok {
|
||||
make_error_response(response, .ValidationException, "TransactItems must be an array")
|
||||
return
|
||||
}
|
||||
|
||||
if len(transact_items) == 0 {
|
||||
make_error_response(response, .ValidationException,
|
||||
"TransactItems must contain at least one item")
|
||||
return
|
||||
}
|
||||
|
||||
if len(transact_items) > 100 {
|
||||
make_error_response(response, .ValidationException,
|
||||
"Member must have length less than or equal to 100")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse each action
|
||||
actions := make([dynamic]dynamodb.Transact_Write_Action)
|
||||
defer {
|
||||
for &action in actions {
|
||||
dynamodb.transact_write_action_destroy(&action)
|
||||
}
|
||||
delete(actions)
|
||||
}
|
||||
|
||||
for elem in transact_items {
|
||||
elem_obj, elem_ok := elem.(json.Object)
|
||||
if !elem_ok {
|
||||
make_error_response(response, .ValidationException,
|
||||
"Each TransactItem must be an object")
|
||||
return
|
||||
}
|
||||
|
||||
action, action_ok := parse_transact_write_action(elem_obj)
|
||||
if !action_ok {
|
||||
make_error_response(response, .ValidationException,
|
||||
"Invalid TransactItem action")
|
||||
return
|
||||
}
|
||||
append(&actions, action)
|
||||
}
|
||||
|
||||
// Execute transaction
|
||||
result, tx_err := dynamodb.transact_write_items(engine, actions[:])
|
||||
defer dynamodb.transact_write_result_destroy(&result)
|
||||
|
||||
switch tx_err {
|
||||
case .None:
|
||||
response_set_body(response, transmute([]byte)string("{}"))
|
||||
|
||||
case .Cancelled:
|
||||
// Build TransactionCanceledException response
|
||||
builder := strings.builder_make()
|
||||
strings.write_string(&builder, `{"__type":"com.amazonaws.dynamodb.v20120810#TransactionCanceledException","message":"Transaction cancelled, please refer cancellation reasons for specific reasons [`)
|
||||
|
||||
for reason, i in result.cancellation_reasons {
|
||||
if i > 0 {
|
||||
strings.write_string(&builder, ", ")
|
||||
}
|
||||
strings.write_string(&builder, reason.code)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, `]","CancellationReasons":[`)
|
||||
|
||||
for reason, i in result.cancellation_reasons {
|
||||
if i > 0 {
|
||||
strings.write_string(&builder, ",")
|
||||
}
|
||||
fmt.sbprintf(&builder, `{{"Code":"%s","Message":"%s"}}`, reason.code, reason.message)
|
||||
}
|
||||
|
||||
strings.write_string(&builder, "]}")
|
||||
|
||||
response_set_status(response, .Bad_Request)
|
||||
resp_body := strings.to_string(builder)
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
|
||||
case .Validation_Error:
|
||||
make_error_response(response, .ValidationException,
|
||||
"Transaction validation failed")
|
||||
|
||||
case .Internal_Error:
|
||||
make_error_response(response, .InternalServerError,
|
||||
"Internal error during transaction")
|
||||
}
|
||||
}
|
||||
|
||||
// Parse a single TransactItem action from JSON
|
||||
@(private = "file")
|
||||
parse_transact_write_action :: proc(obj: json.Object) -> (dynamodb.Transact_Write_Action, bool) {
|
||||
action: dynamodb.Transact_Write_Action
|
||||
action.expr_attr_values = make(map[string]dynamodb.Attribute_Value)
|
||||
|
||||
// Try Put
|
||||
if put_val, has_put := obj["Put"]; has_put {
|
||||
put_obj, put_ok := put_val.(json.Object)
|
||||
if !put_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.type = .Put
|
||||
return parse_transact_put_action(put_obj, &action)
|
||||
}
|
||||
|
||||
// Try Delete
|
||||
if del_val, has_del := obj["Delete"]; has_del {
|
||||
del_obj, del_ok := del_val.(json.Object)
|
||||
if !del_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.type = .Delete
|
||||
return parse_transact_key_action(del_obj, &action)
|
||||
}
|
||||
|
||||
// Try Update
|
||||
if upd_val, has_upd := obj["Update"]; has_upd {
|
||||
upd_obj, upd_ok := upd_val.(json.Object)
|
||||
if !upd_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.type = .Update
|
||||
return parse_transact_update_action(upd_obj, &action)
|
||||
}
|
||||
|
||||
// Try ConditionCheck
|
||||
if cc_val, has_cc := obj["ConditionCheck"]; has_cc {
|
||||
cc_obj, cc_ok := cc_val.(json.Object)
|
||||
if !cc_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.type = .Condition_Check
|
||||
return parse_transact_key_action(cc_obj, &action)
|
||||
}
|
||||
|
||||
return {}, false
|
||||
}
|
||||
|
||||
// Parse common expression fields from a transact action object
|
||||
@(private = "file")
|
||||
parse_transact_expression_fields :: proc(obj: json.Object, action: ^dynamodb.Transact_Write_Action) {
|
||||
// ConditionExpression
|
||||
if ce_val, found := obj["ConditionExpression"]; found {
|
||||
if ce_str, str_ok := ce_val.(json.String); str_ok {
|
||||
action.condition_expr = strings.clone(string(ce_str))
|
||||
}
|
||||
}
|
||||
|
||||
// ExpressionAttributeNames
|
||||
if ean_val, found := obj["ExpressionAttributeNames"]; found {
|
||||
if ean_obj, ean_ok := ean_val.(json.Object); ean_ok {
|
||||
names := make(map[string]string)
|
||||
for key, val in ean_obj {
|
||||
if str, str_ok := val.(json.String); str_ok {
|
||||
names[strings.clone(key)] = strings.clone(string(str))
|
||||
}
|
||||
}
|
||||
action.expr_attr_names = names
|
||||
}
|
||||
}
|
||||
|
||||
// ExpressionAttributeValues
|
||||
if eav_val, found := obj["ExpressionAttributeValues"]; found {
|
||||
if eav_obj, eav_ok := eav_val.(json.Object); eav_ok {
|
||||
for key, val in eav_obj {
|
||||
attr, attr_ok := dynamodb.parse_attribute_value(val)
|
||||
if attr_ok {
|
||||
action.expr_attr_values[strings.clone(key)] = attr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse a Put transact action
|
||||
@(private = "file")
|
||||
parse_transact_put_action :: proc(
|
||||
obj: json.Object,
|
||||
action: ^dynamodb.Transact_Write_Action,
|
||||
) -> (dynamodb.Transact_Write_Action, bool) {
|
||||
// TableName
|
||||
tn_val, tn_found := obj["TableName"]
|
||||
if !tn_found {
|
||||
return {}, false
|
||||
}
|
||||
tn_str, tn_ok := tn_val.(json.String)
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Item
|
||||
item_val, item_found := obj["Item"]
|
||||
if !item_found {
|
||||
return {}, false
|
||||
}
|
||||
item, item_ok := dynamodb.parse_item_from_value(item_val)
|
||||
if !item_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.item = item
|
||||
|
||||
// Expression fields
|
||||
parse_transact_expression_fields(obj, action)
|
||||
|
||||
return action^, true
|
||||
}
|
||||
|
||||
// Parse a Delete or ConditionCheck transact action (both use Key)
|
||||
@(private = "file")
|
||||
parse_transact_key_action :: proc(
|
||||
obj: json.Object,
|
||||
action: ^dynamodb.Transact_Write_Action,
|
||||
) -> (dynamodb.Transact_Write_Action, bool) {
|
||||
// TableName
|
||||
tn_val, tn_found := obj["TableName"]
|
||||
if !tn_found {
|
||||
return {}, false
|
||||
}
|
||||
tn_str, tn_ok := tn_val.(json.String)
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Key
|
||||
key_val, key_found := obj["Key"]
|
||||
if !key_found {
|
||||
return {}, false
|
||||
}
|
||||
key, key_ok := dynamodb.parse_item_from_value(key_val)
|
||||
if !key_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.key = key
|
||||
|
||||
// Expression fields
|
||||
parse_transact_expression_fields(obj, action)
|
||||
|
||||
return action^, true
|
||||
}
|
||||
|
||||
// Parse an Update transact action
|
||||
@(private = "file")
|
||||
parse_transact_update_action :: proc(
|
||||
obj: json.Object,
|
||||
action: ^dynamodb.Transact_Write_Action,
|
||||
) -> (dynamodb.Transact_Write_Action, bool) {
|
||||
// TableName
|
||||
tn_val, tn_found := obj["TableName"]
|
||||
if !tn_found {
|
||||
return {}, false
|
||||
}
|
||||
tn_str, tn_ok := tn_val.(json.String)
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Key
|
||||
key_val, key_found := obj["Key"]
|
||||
if !key_found {
|
||||
return {}, false
|
||||
}
|
||||
key, key_ok := dynamodb.parse_item_from_value(key_val)
|
||||
if !key_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.key = key
|
||||
|
||||
// Expression fields (must be parsed before UpdateExpression so attr values are available)
|
||||
parse_transact_expression_fields(obj, action)
|
||||
|
||||
// UpdateExpression
|
||||
ue_val, ue_found := obj["UpdateExpression"]
|
||||
if !ue_found {
|
||||
return {}, false
|
||||
}
|
||||
ue_str, ue_ok := ue_val.(json.String)
|
||||
if !ue_ok {
|
||||
return {}, false
|
||||
}
|
||||
|
||||
plan, plan_ok := dynamodb.parse_update_expression(
|
||||
string(ue_str), action.expr_attr_names, action.expr_attr_values,
|
||||
)
|
||||
if !plan_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.update_plan = plan
|
||||
|
||||
return action^, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TransactGetItems Handler
|
||||
//
|
||||
// Request format:
|
||||
// {
|
||||
// "TransactItems": [
|
||||
// {
|
||||
// "Get": {
|
||||
// "TableName": "...",
|
||||
// "Key": { ... },
|
||||
// "ProjectionExpression": "...", // optional
|
||||
// "ExpressionAttributeNames": { ... } // optional
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// ============================================================================
|
||||
|
||||
handle_transact_get_items :: proc(
|
||||
engine: ^dynamodb.Storage_Engine,
|
||||
request: ^HTTP_Request,
|
||||
response: ^HTTP_Response,
|
||||
) {
|
||||
data, parse_err := json.parse(request.body, allocator = context.allocator)
|
||||
if parse_err != nil {
|
||||
make_error_response(response, .SerializationException, "Invalid JSON")
|
||||
return
|
||||
}
|
||||
defer json.destroy_value(data)
|
||||
|
||||
root, root_ok := data.(json.Object)
|
||||
if !root_ok {
|
||||
make_error_response(response, .SerializationException, "Request must be an object")
|
||||
return
|
||||
}
|
||||
|
||||
transact_items_val, found := root["TransactItems"]
|
||||
if !found {
|
||||
make_error_response(response, .ValidationException, "Missing TransactItems")
|
||||
return
|
||||
}
|
||||
|
||||
transact_items, ti_ok := transact_items_val.(json.Array)
|
||||
if !ti_ok {
|
||||
make_error_response(response, .ValidationException, "TransactItems must be an array")
|
||||
return
|
||||
}
|
||||
|
||||
if len(transact_items) == 0 {
|
||||
make_error_response(response, .ValidationException,
|
||||
"TransactItems must contain at least one item")
|
||||
return
|
||||
}
|
||||
|
||||
if len(transact_items) > 100 {
|
||||
make_error_response(response, .ValidationException,
|
||||
"Member must have length less than or equal to 100")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse each get action
|
||||
actions := make([dynamic]dynamodb.Transact_Get_Action)
|
||||
defer {
|
||||
for &action in actions {
|
||||
dynamodb.transact_get_action_destroy(&action)
|
||||
}
|
||||
delete(actions)
|
||||
}
|
||||
|
||||
for elem in transact_items {
|
||||
elem_obj, elem_ok := elem.(json.Object)
|
||||
if !elem_ok {
|
||||
make_error_response(response, .ValidationException,
|
||||
"Each TransactItem must be an object")
|
||||
return
|
||||
}
|
||||
|
||||
get_val, has_get := elem_obj["Get"]
|
||||
if !has_get {
|
||||
make_error_response(response, .ValidationException,
|
||||
"TransactGetItems only supports Get actions")
|
||||
return
|
||||
}
|
||||
|
||||
get_obj, get_ok := get_val.(json.Object)
|
||||
if !get_ok {
|
||||
make_error_response(response, .ValidationException,
|
||||
"Get action must be an object")
|
||||
return
|
||||
}
|
||||
|
||||
action, action_ok := parse_transact_get_action(get_obj)
|
||||
if !action_ok {
|
||||
make_error_response(response, .ValidationException,
|
||||
"Invalid Get action")
|
||||
return
|
||||
}
|
||||
append(&actions, action)
|
||||
}
|
||||
|
||||
// Execute transaction get
|
||||
result, tx_err := dynamodb.transact_get_items(engine, actions[:])
|
||||
defer dynamodb.transact_get_result_destroy(&result)
|
||||
|
||||
if tx_err != .None {
|
||||
make_error_response(response, .InternalServerError,
|
||||
"Transaction get failed")
|
||||
return
|
||||
}
|
||||
|
||||
// Build response
|
||||
builder := strings.builder_make(context.allocator)
|
||||
defer strings.builder_destroy(&builder)
|
||||
|
||||
strings.write_string(&builder, `{"Responses":[`)
|
||||
|
||||
for maybe_item, i in result.items {
|
||||
if i > 0 {
|
||||
strings.write_string(&builder, ",")
|
||||
}
|
||||
|
||||
if item, has_item := maybe_item.?; has_item {
|
||||
strings.write_string(&builder, `{"Item":`)
|
||||
dynamodb.serialize_item_to_builder(&builder, item)
|
||||
strings.write_string(&builder, `}`)
|
||||
} else {
|
||||
strings.write_string(&builder, "{}")
|
||||
}
|
||||
}
|
||||
|
||||
strings.write_string(&builder, "]}")
|
||||
|
||||
// Clone the string or we gonna have issues again
|
||||
resp_body := strings.clone(strings.to_string(builder))
|
||||
response_set_body(response, transmute([]byte)resp_body)
|
||||
}
|
||||
|
||||
// Parse a single TransactGetItems Get action
|
||||
@(private = "file")
|
||||
parse_transact_get_action :: proc(obj: json.Object) -> (dynamodb.Transact_Get_Action, bool) {
|
||||
action: dynamodb.Transact_Get_Action
|
||||
|
||||
// TableName
|
||||
tn_val, tn_found := obj["TableName"]
|
||||
if !tn_found {
|
||||
return {}, false
|
||||
}
|
||||
tn_str, tn_ok := tn_val.(json.String)
|
||||
if !tn_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.table_name = strings.clone(string(tn_str))
|
||||
|
||||
// Key
|
||||
key_val, key_found := obj["Key"]
|
||||
if !key_found {
|
||||
return {}, false
|
||||
}
|
||||
key, key_ok := dynamodb.parse_item_from_value(key_val)
|
||||
if !key_ok {
|
||||
return {}, false
|
||||
}
|
||||
action.key = key
|
||||
|
||||
// ProjectionExpression (optional)
|
||||
if pe_val, pe_found := obj["ProjectionExpression"]; pe_found {
|
||||
if pe_str, pe_ok := pe_val.(json.String); pe_ok {
|
||||
// Parse ExpressionAttributeNames for projection
|
||||
attr_names: Maybe(map[string]string) = nil
|
||||
if ean_val, ean_found := obj["ExpressionAttributeNames"]; ean_found {
|
||||
if ean_obj, ean_ok := ean_val.(json.Object); ean_ok {
|
||||
names := make(map[string]string, allocator = context.temp_allocator)
|
||||
for key_str, val in ean_obj {
|
||||
if str, str_ok := val.(json.String); str_ok {
|
||||
names[key_str] = string(str)
|
||||
}
|
||||
}
|
||||
attr_names = names
|
||||
}
|
||||
}
|
||||
|
||||
parts := strings.split(string(pe_str), ",")
|
||||
paths := make([dynamic]string)
|
||||
for part in parts {
|
||||
trimmed := strings.trim_space(part)
|
||||
if len(trimmed) == 0 {
|
||||
continue
|
||||
}
|
||||
resolved, res_ok := dynamodb.resolve_attribute_name(trimmed, attr_names)
|
||||
if !res_ok {
|
||||
delete(paths)
|
||||
dynamodb.item_destroy(&action.key)
|
||||
return {}, false
|
||||
}
|
||||
append(&paths, strings.clone(resolved))
|
||||
}
|
||||
action.projection = paths[:]
|
||||
}
|
||||
}
|
||||
|
||||
return action, true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// UPDATED_NEW / UPDATED_OLD Filtering Helper
|
||||
//
|
||||
// DynamoDB ReturnValues semantics:
|
||||
// ALL_NEW → all attributes of the item after the update
|
||||
// ALL_OLD → all attributes of the item before the update
|
||||
// UPDATED_NEW → only the attributes that were modified, with new values
|
||||
// UPDATED_OLD → only the attributes that were modified, with old values
|
||||
//
|
||||
// This filters an item to only include the attributes touched by the
|
||||
// UpdateExpression (the "modified paths").
|
||||
// ============================================================================
|
||||
|
||||
filter_updated_attributes :: proc(
|
||||
item: dynamodb.Item,
|
||||
plan: ^dynamodb.Update_Plan,
|
||||
) -> dynamodb.Item {
|
||||
modified_paths := dynamodb.get_update_plan_modified_paths(plan)
|
||||
defer delete(modified_paths)
|
||||
|
||||
return dynamodb.filter_item_to_paths(item, modified_paths)
|
||||
}
|
||||
Reference in New Issue
Block a user