remove the verbose logging stuff. clean up README and comments from the weird nonsense LLMs added

This commit is contained in:
2026-03-07 17:08:26 -05:00
parent 6450f905c3
commit 29136a3740
21 changed files with 131 additions and 154 deletions

View File

@@ -57,6 +57,5 @@ VOLUME ["/data"]
ENV JORMUN_HOST=0.0.0.0
ENV JORMUN_PORT=8002
ENV JORMUN_DATA_DIR=/data
ENV JORMUN_VERBOSE=0
ENTRYPOINT ["./jormundb"]

View File

@@ -53,7 +53,6 @@ EXTRA_LINKER_FLAGS = $(LIB_PATH) $(SHIM_LIB) $(ROCKSDB_LIBS)
PORT ?= 8002
HOST ?= 0.0.0.0
DATA_DIR ?= ./data
VERBOSE ?= 0
# Colors for output
BLUE := \033[0;34m
@@ -62,7 +61,7 @@ YELLOW := \033[0;33m
RED := \033[0;31m
NC := \033[0m # No Color
# To this use a sentinel file instead of the dir name
# To this use a sentinel file instead of the dir name
$(BUILD_DIR)/.dir:
@mkdir -p $(BUILD_DIR)
@touch $(BUILD_DIR)/.dir
@@ -101,7 +100,6 @@ run: build
@JORMUN_PORT=$(PORT) \
JORMUN_HOST=$(HOST) \
JORMUN_DATA_DIR=$(DATA_DIR) \
JORMUN_VERBOSE=$(VERBOSE) \
$(BUILD_DIR)/$(PROJECT_NAME)
# Run with custom port
@@ -209,9 +207,7 @@ help:
@echo " PORT=$(PORT) - Server port"
@echo " HOST=$(HOST) - Bind address"
@echo " DATA_DIR=$(DATA_DIR) - RocksDB data directory"
@echo " VERBOSE=$(VERBOSE) - Enable verbose logging (0/1)"
@echo ""
@echo "$(GREEN)Examples:$(NC)"
@echo " make run PORT=9000"
@echo " make run DATA_DIR=/tmp/jormun VERBOSE=1"
@echo " make dev"

View File

@@ -133,12 +133,6 @@ make run PORT=9000
# Custom data directory
make run DATA_DIR=/tmp/jormun
# Enable verbose logging
make run VERBOSE=1
# Combine options
make run PORT=9000 DATA_DIR=/var/jormun VERBOSE=1
```
### Environment Variables

View File

@@ -14,9 +14,8 @@
## What is JormunDB?
JormunDB is a Self-Hosted DynamoDB replacement that speaks the DynamoDB wire protocol. Point your AWS SDK or CLI at it and use it as a drop-in replacement.
**Why Odin?** The original Zig implementation suffered from explicit allocator threading. Where every function ended up needing an `allocator` parameter and every allocation needed `errdefer` cleanup. Odin's implicit context allocator system eliminates this ceremony. Just one `context.allocator = arena_allocator` at the request handler entry and it feels more like working with ctx in Go instead of filling out tax forms.
JormunDB is an obnoxiously fast, single-tenant DynamoDB replacement designed for teams who need something under their control.
It speaks the DynamoDB wire protocol, so any existing AWS SDK client works against it without modification.
## Features
@@ -154,12 +153,11 @@ handle_request :: proc(conn: net.TCP_Socket) {
context.allocator = mem.arena_allocator(&arena)
// Everything below uses the arena automatically
// No manual frees, no errdefer cleanup needed
request := parse_request() // Uses context.allocator
response := process(request) // Uses context.allocator
send_response(response) // Uses context.allocator
// Arena is freed here automatically
// Arena is freed automatically with some exceptions
}
```
@@ -184,7 +182,7 @@ make fmt
make clean
# Run with custom settings
make run PORT=9000 DATA_DIR=/tmp/db VERBOSE=1
make run PORT=9000 DATA_DIR=/tmp/db
```
## Performance
@@ -246,6 +244,8 @@ Benchmarked on single node localhost, 1000 iterations per test.
- ⏳ UpdateItem (works but needs UPDATED_NEW/UPDATED_OLD response filtering to work for full Dynamo Parity)
- ⏳ Local Secondary Indexes
- ⏳ Read-Only replicas via WAL
- ⏳ A "Rebuild Index" Tool
## Configuration
@@ -255,68 +255,62 @@ Benchmarked on single node localhost, 1000 iterations per test.
JORMUN_PORT=8002 # Server port (I have something locally on port 8000 so now everyone has to use port 8002)
JORMUN_HOST=0.0.0.0 # Bind address
JORMUN_DATA_DIR=./data # RocksDB data directory
JORMUN_VERBOSE=1 # Enable verbose logging
```
### Command Line Arguments
```bash
./jormundb --port 9000 --host 127.0.0.1 --data-dir /var/db --verbose
./jormundb --port 9000 --host 127.0.0.1 --data-dir /var/db
```
## Troubleshooting
## Design Philosophy
### "Cannot open RocksDB"
**One instance, one application.** JormunDB is intentionally single-tenant. Rather than running one massive shared database for all your applications, the model is to spin up a dedicated JormunDB instance per application. Instances are cheap. Coordination is not. You can run multiple tables per instance if you want but that's not what it's optimized for, and it's not a replacement for a full RDBMS. There's no query optimizer, no planner, none of the real crazy stuff that PostgreSQL or real DynamoDB has built up over years to solve specific bottlenecks. It's a fast, durable key-value store that speaks a protocol you already know.
Ensure RocksDB libraries are installed and the data directory is writable:
**Local disk is fast.** RocksDB is exceptional at squeezing performance out of local storage. JormunDB is designed to run on bare metal with NVMe drives, or on VMs where the disk is local at the host level. Networked storage probably won't see much benefit from Jormun. The closer the data is to the CPU, the better.
```bash
# Check RocksDB installation
pkg-config --libs rocksdb
**Minimal config, maximal durability and portability.** RocksDB handles durability. JormunDB handles the DynamoDB protocol. Compiles to a single binary.
# Check permissions
mkdir -p ./data
chmod 755 ./data
```
**It's an excellent write-through cache.** The original use case was a write-through cache sitting in front of a production RDBMS. If you already have the AWS SDK in your stack for S3 or other services, you have a DynamoDB client sitting there doing nothing. Put it to work.
### "Connection refused"
**HTTPS is optional and handled externally.** If you need TLS, put Caddy in front of it. JormunDB is designed to run inside a VPC with no public IP. The optional access key check is not a cryptographic auth mechanism.
Check if the port is already in use:
---
```bash
lsof -i :8002
```
## Why Not Just Use DynamoDB Local?
### "Invalid JSON" errors
DynamoDB Local is a development tool and not built for production workloads. Under the hood it's a Java application backed by SQLite where every operation goes through two layers of translation: DynamoDB semantics get mapped into SQL, SQLite executes it as a relational query, and the result gets mapped back. Serializing a key-value workload into a relational engine and then back out again was what I was trying to avoid in the first place.
Ensure you're using the correct DynamoDB JSON format:
RocksDB is a native LSM-tree key-value store so it's architecturally closer to what DynamoDB actually is under the hood. JormunDB skips the translation entirely. The data path is the Request, Odin, RocksDB, and Disk.
---
## Why Odin?
This project went through a few languages before landing on Odin, and the journey is worth explaining.
**C++** was the obvious choice but the ecosystem overhead wasn't worth it for a focused project like this.
**Rust** came next, as is trendy. The problem I learned is that a database is an imperative, stateful thing that needs mutation and memory management. Rust's functional programming model fights at every turn, and binding RocksDB without sprinkling `unsafe` everywhere turned out to be essentially impossible.
**Zig** got further than anything else, and the original implementation was solid. The dealbreaker was allocator threading, every function that allocates needs an explicit `allocator` parameter, and every allocation needs `errdefer` cleanup. For a request-handling database, this meant every function in the call stack had an allocator bolted on. It works, but it's ceremonious in a way that compounds and became fatigue.
**Odin** solved the exact problem Zig introduced. Odin has an implicit context system where the allocator lives in a thread-local context object. Set `context.allocator = request_arena` once at the top of your request handler, and every allocation downstream automatically uses the arena. It feels closer to working with `ctx` in Go instead of filling out tax forms. The entire request lifetime is managed by a single growing arena that gets thrown away when the request completes.
That model is a natural fit for a request-handling server, and it's a big part of why JormunDB is as fast as it is.
## Note:
Complex auth systems tend to become an attack surface. Issues like leaked keys, token forgery, and privilege escalation are problems that come with the complexity of a robust auth implementation rather than the underlying data layer. I did not want to deal with multi-tenant credential stores, token refresh logic, and signature verification edge cases. I focused on this doing 1 job because I can leverage the strength in that. More robust auth will come in the future. For now, simple is safe.
```json
{
"TableName": "Users",
"Item": {
"id": {"S": "user123"},
"age": {"N": "30"}
}
}
```
## Credits
- Inspired by DynamoDB
- Built with [Odin](https://odin-lang.org/)
- Powered by [RocksDB](https://rocksdb.org/)
- Originally implemented as ZynamoDB in Zig
## Contributing
Contributions welcome! Please:
1. Format code with `make fmt`
2. Run tests with `make test`
3. Update documentation as needed
4. Follow Odin idioms (context allocators, explicit returns, etc.)
---
**Why "Jormun"?** Jörmungandr, the World Serpent from Norse mythology, which I found fitting for something built in a language called Odin. Also, it sounds cool.
**Why "Jormun"?** Jörmungandr, the World Serpent from Norse mythology, which I found fitting for something built in a language called Odin. Also, it sounds cool. Think of deploying a "Jormun Cluster", just rolls off the tongue and sounds crazy.

26
TODO.md
View File

@@ -1,4 +1,4 @@
# JormunDB (Odin rewrite) TODO
# JormunDB (Odin rewrite) TODO
This tracks what's left to stabilize + extend the project
@@ -8,34 +8,34 @@ Goal: "aws cli works reliably for CreateTable/ListTables/PutItem/GetItem/DeleteI
### 1) HTTP + routing hardening
- [ ] Audit request parsing boundaries:
- Max body size enforcement **DONE**
- Max body size enforcement **DONE**
- Missing/invalid headers → correct DynamoDB error types
- Content-Type handling (be permissive but consistent)
- [x] Ensure **all request-scoped allocations** come from the request arena (no accidental long-lived allocs)
- Verified: `handle_connection` in http.odin sets `context.allocator = request_alloc`
- Long-lived data (table metadata, locks) explicitly uses `engine.allocator`
- [x] Standardize error responses:
- `__type` formatting done, uses `com.amazonaws.dynamodb.v20120810#ErrorType`
- `message` field consistency done
- Status code mapping per error type **DONE**: centralized `handle_storage_error` + `make_error_response` now maps InternalServerError→500, everything else→400
- `__type` formatting done, uses `com.amazonaws.dynamodb.v20120810#ErrorType`
- `message` field consistency done
- Status code mapping per error type **DONE**: centralized `handle_storage_error` + `make_error_response` now maps InternalServerError→500, everything else→400
- Missing X-Amz-Target now returns `SerializationException` (matches real DynamoDB)
### 2) Storage correctness edge cases
- [x] Table metadata durability + validation:
- [x] Reject duplicate tables done in `create_table` (checks existing meta key)
- [x] Reject invalid key schema done in `parse_key_schema` (no HASH, multiple HASH, etc.)
- [x] Reject duplicate tables done in `create_table` (checks existing meta key)
- [x] Reject invalid key schema done in `parse_key_schema` (no HASH, multiple HASH, etc.)
- [x] Item validation against key schema:
- [x] Missing PK/SK errors done in `key_from_item`
- [x] Type mismatch errors (S/N/B) **DONE**: new `validate_item_key_types` proc checks item key attr types against AttributeDefinitions
- [x] Missing PK/SK errors done in `key_from_item`
- [x] Type mismatch errors (S/N/B) **DONE**: new `validate_item_key_types` proc checks item key attr types against AttributeDefinitions
- [ ] Deterministic encoding tests:
- [ ] Key codec round-trip
- [ ] TLV item encode/decode round-trip (nested maps/lists/sets)
### 3) Query/Scan pagination parity
- [x] Make pagination behavior match AWS CLI expectations:
- [x] `Limit` done
- [x] `ExclusiveStartKey` done (parsed via JSON object lookup with key schema type reconstruction)
- [x] `LastEvaluatedKey` generation **FIXED**: now saves key of *last returned item* (not next unread item); only emits when more results exist
- [x] `Limit` done
- [x] `ExclusiveStartKey` done (parsed via JSON object lookup with key schema type reconstruction)
- [x] `LastEvaluatedKey` generation **FIXED**: now saves key of *last returned item* (not next unread item); only emits when more results exist
- [ ] Add "golden" pagination tests:
- [ ] Query w/ sort key ranges
- [ ] Scan limit + resume loop
@@ -45,7 +45,7 @@ Goal: "aws cli works reliably for CreateTable/ListTables/PutItem/GetItem/DeleteI
- **DONE**: `parse_key_condition_expression_string` uses JSON object lookup (handles whitespace/ordering safely)
- [ ] Add validation + better errors for malformed expressions
- [x] Expand operator coverage: BETWEEN and begins_with are implemented in parser
- [x] **Sort key condition filtering in query** **DONE**: `query()` now accepts optional `Sort_Key_Condition` and applies it (=, <, <=, >, >=, BETWEEN, begins_with)
- [x] **Sort key condition filtering in query** **DONE**: `query()` now accepts optional `Sort_Key_Condition` and applies it (=, <, <=, >, >=, BETWEEN, begins_with)
### 5) Service Features
- [ ] Configuration settings like environment variables for defining users and credentials

View File

@@ -1,5 +1,7 @@
#!/bin/bash
# This file is because I am barbaric and don't use things like claude code but when I want to leverage an LLM, I can run this, it concats the code into a single file and then i can throw it into a chat window and give it all the code
# Output file
OUTPUT_FILE="jormundb-odin-project_context.txt"

View File

@@ -10,5 +10,4 @@ services:
JORMUN_HOST: 0.0.0.0
JORMUN_PORT: 8002
JORMUN_DATA_DIR: /data
JORMUN_VERBOSE: 0
JORMUN_ACCESS_KEY: AKIAIOSFODNN7EXAMPLE

View File

@@ -31,7 +31,7 @@ Batch_Write_Table_Request :: struct {
}
Batch_Write_Result :: struct {
// UnprocessedItems requests that failed and should be retried.
// UnprocessedItems requests that failed and should be retried.
// For now we process everything or return an error, so this is
// typically empty. Populated only on partial failures.
unprocessed: [dynamic]Batch_Write_Table_Request,
@@ -48,7 +48,7 @@ batch_write_result_destroy :: proc(result: ^Batch_Write_Result) {
}
// ============================================================================
// BatchWriteItem Execute a batch of put/delete operations
// BatchWriteItem Execute a batch of put/delete operations
//
// DynamoDB semantics:
// - Operations within a batch are NOT atomic (some may succeed, some fail)
@@ -93,7 +93,7 @@ batch_write_item :: proc(
if var_err != .None {
#partial switch var_err {
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
// Hard validation errors fail the entire batch
// Hard validation errors fail the entire batch
batch_write_result_destroy(&result)
delete(failed_requests)
return result, var_err
@@ -106,7 +106,7 @@ batch_write_item :: proc(
return result, .Table_Not_Found
case .RocksDB_Error, .Item_Not_Found:
// Genuinely transient/infrastructure errors add to UnprocessedItems.
// Genuinely transient/infrastructure errors add to UnprocessedItems.
failed_item := item_deep_copy(req.item)
append(&failed_requests, Write_Request{
type = req.type,
@@ -176,7 +176,7 @@ batch_get_result_destroy :: proc(result: ^Batch_Get_Result) {
}
// ============================================================================
// BatchGetItem Retrieve multiple items from one or more tables
// BatchGetItem Retrieve multiple items from one or more tables
//
// DynamoDB semantics:
// - Each key is fetched independently
@@ -216,14 +216,14 @@ batch_get_item :: proc(
if get_err != .None && get_err != .Item_Not_Found {
#partial switch get_err {
case .Missing_Key_Attribute, .Invalid_Key, .Serialization_Error:
// Hard validation error fail the entire batch
// Hard validation error fail the entire batch
batch_get_result_destroy(&result)
delete(found_items)
delete(failed_keys)
return result, get_err
case .RocksDB_Error, .Table_Not_Found:
// Transient error add to unprocessed
// Transient error add to unprocessed
append(&failed_keys, item_deep_copy(key))
continue

View File

@@ -10,9 +10,9 @@
// - All comparisons → false (no attribute to compare)
//
// This file provides:
// 1. parse_condition_expression_string extract ConditionExpression from JSON body
// 2. evaluate_condition evaluate parsed condition against an item
// 3. Condition_Result result enum for condition evaluation
// 1. parse_condition_expression_string extract ConditionExpression from JSON body
// 2. evaluate_condition evaluate parsed condition against an item
// 3. Condition_Result result enum for condition evaluation
package dynamodb
import "core:encoding/json"
@@ -67,15 +67,15 @@ parse_condition_expression_string :: proc(request_body: []byte) -> (expr: string
// request body, then evaluates against the existing item.
//
// Parameters:
// request_body full JSON request body
// existing_item the item currently in the database (nil if no item exists)
// attr_names pre-parsed ExpressionAttributeNames (caller may already have these)
// attr_values pre-parsed ExpressionAttributeValues
// request_body full JSON request body
// existing_item the item currently in the database (nil if no item exists)
// attr_names pre-parsed ExpressionAttributeNames (caller may already have these)
// attr_values pre-parsed ExpressionAttributeValues
//
// Returns Condition_Result:
// .Passed no ConditionExpression, or condition evaluated to true
// .Failed condition evaluated to false
// .Parse_Error ConditionExpression is malformed
// .Passed no ConditionExpression, or condition evaluated to true
// .Failed condition evaluated to false
// .Parse_Error ConditionExpression is malformed
// ============================================================================
evaluate_condition_expression :: proc(
@@ -107,7 +107,7 @@ evaluate_condition_expression :: proc(
if item, has_item := existing_item.?; has_item {
eval_item = item
} else {
// Empty item no attributes exist
// Empty item no attributes exist
eval_item = Item{}
}

View File

@@ -64,10 +64,10 @@ parse_projection_expression :: proc(
return result[:], true
}
// Apply projection to a single item returns a new item with only the specified attributes
// Apply projection to a single item returns a new item with only the specified attributes
apply_projection :: proc(item: Item, projection: []string) -> Item {
if len(projection) == 0 {
// No projection return a deep copy of the full item
// No projection return a deep copy of the full item
return item_deep_copy(item)
}

View File

@@ -1,4 +1,4 @@
// gsi_metadata.odin GSI metadata parsing for serialize/deserialize_table_metadata
// gsi_metadata.odin GSI metadata parsing for serialize/deserialize_table_metadata
//
// Parses GSI definitions from the embedded JSON string stored in table metadata.
// This file lives in the dynamodb/ package.

View File

@@ -540,13 +540,13 @@ parse_exclusive_start_key :: proc(
) -> (result: Maybe([]byte), ok: bool, body_err: bool) {
data, parse_err := json.parse(request_body, allocator = context.temp_allocator)
if parse_err != nil {
return nil, false, true // body is not valid JSON real error
return nil, false, true // body is not valid JSON real error
}
defer json.destroy_value(data)
root, root_ok := data.(json.Object)
if !root_ok {
return nil, false, true // root must be an object real error
return nil, false, true // root must be an object real error
}
esk_val, found := root["ExclusiveStartKey"]
@@ -582,7 +582,7 @@ parse_exclusive_start_key :: proc(
}
// parse_exclusive_start_key_gsi ... Just a helper for GSI keys
// Returns (key, ok, body_parse_err) same contract as parse_exclusive_start_key.
// Returns (key, ok, body_parse_err) same contract as parse_exclusive_start_key.
parse_exclusive_start_key_gsi :: proc(
request_body: []byte,
table_name: string,

View File

@@ -1,4 +1,4 @@
// key_codec_gsi.odin Additional key codec functions for GSI support
// key_codec_gsi.odin Additional key codec functions for GSI support
//
// These procedures complement key_codec.odin with prefix builders needed
// for GSI scanning and querying. They follow the same encoding conventions:

View File

@@ -643,7 +643,7 @@ create_table :: proc(
return desc, .None
}
// Delete table removes metadata AND all items with the table's data prefix
// Delete table removes metadata AND all items with the table's data prefix
delete_table :: proc(engine: ^Storage_Engine, table_name: string) -> Storage_Error {
table_lock := get_or_create_table_lock(engine, table_name)
sync.rw_mutex_lock(table_lock)
@@ -746,7 +746,7 @@ delete_table :: proc(engine: ^Storage_Engine, table_name: string) -> Storage_Err
// Item Operations
// ============================================================================
// Put item uses EXCLUSIVE lock (write operation)
// Put item uses EXCLUSIVE lock (write operation)
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI updates are atomic
put_item :: proc(engine: ^Storage_Engine, table_name: string, item: Item) -> Storage_Error {
table_lock := get_or_create_table_lock(engine, table_name)
@@ -794,15 +794,15 @@ put_item :: proc(engine: ^Storage_Engine, table_name: string, item: Item) -> Sto
old_item: Maybe(Item) = nil
existing_value, existing_err := rocksdb.db_get(&engine.db, storage_key)
if existing_err == .NotFound {
// Item does not exist nothing to clean up, proceed normally.
// Item does not exist nothing to clean up, proceed normally.
} else if existing_err != .None {
// Unexpected RocksDB I/O error fail closed to avoid orphaned GSI entries.
// Unexpected RocksDB I/O error fail closed to avoid orphaned GSI entries.
return .RocksDB_Error
} else if existing_value != nil {
defer delete(existing_value)
decoded_old, decode_ok := decode(existing_value)
if !decode_ok {
// Value exists but is unreadable fail closed rather than leaving
// Value exists but is unreadable fail closed rather than leaving
// stale GSI entries behind after the overwrite.
return .Serialization_Error
}
@@ -856,7 +856,7 @@ put_item :: proc(engine: ^Storage_Engine, table_name: string, item: Item) -> Sto
return .None
}
// Get item uses SHARED lock (read operation)
// Get item uses SHARED lock (read operation)
get_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> (Maybe(Item), Storage_Error) {
table_lock := get_or_create_table_lock(engine, table_name)
sync.rw_mutex_shared_lock(table_lock)
@@ -912,7 +912,7 @@ get_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> (May
return item, .None
}
// Delete item uses EXCLUSIVE lock (write operation)
// Delete item uses EXCLUSIVE lock (write operation)
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI deletions are atomic
delete_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> Storage_Error {
table_lock := get_or_create_table_lock(engine, table_name)
@@ -954,16 +954,16 @@ delete_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> S
old_item: Maybe(Item) = nil
existing_value, existing_err := rocksdb.db_get(&engine.db, storage_key)
if existing_err == .NotFound {
// Item does not exist nothing to delete (DynamoDB idempotent delete).
// Item does not exist nothing to delete (DynamoDB idempotent delete).
return .None
} else if existing_err != .None {
// Unexpected RocksDB I/O error fail closed.
// Unexpected RocksDB I/O error fail closed.
return .RocksDB_Error
} else if existing_value != nil {
defer delete(existing_value)
decoded_old, decode_ok := decode(existing_value)
if !decode_ok {
// Value exists but is corrupt fail closed rather than deleting the
// Value exists but is corrupt fail closed rather than deleting the
// base item while leaving its GSI entries dangling.
return .Serialization_Error
}
@@ -1010,7 +1010,7 @@ delete_item :: proc(engine: ^Storage_Engine, table_name: string, key: Item) -> S
}
// ============================================================================
// Scan with FIXED pagination
// Scan with FIXED pagination
//
// FIX: LastEvaluatedKey must be the key of the LAST RETURNED item, not the
// next unread item. DynamoDB semantics: ExclusiveStartKey resumes
@@ -1047,7 +1047,7 @@ scan :: proc(
// Seek to start position
if start_key, has_start := exclusive_start_key.?; has_start {
// Resume from pagination token seek to the key then skip it (exclusive)
// Resume from pagination token seek to the key then skip it (exclusive)
rocksdb.rocksdb_iter_seek(iter, raw_data(start_key), c.size_t(len(start_key)))
if rocksdb.rocksdb_iter_valid(iter) != 0 {
rocksdb.rocksdb_iter_next(iter)
@@ -1076,7 +1076,7 @@ scan :: proc(
break
}
// Check limit if we already have enough items, note there's more and stop
// Check limit if we already have enough items, note there's more and stop
if count >= max_items {
has_more = true
break
@@ -1127,7 +1127,7 @@ scan :: proc(
}
// ============================================================================
// Query with sort key condition filtering and FIXED pagination
// Query with sort key condition filtering and FIXED pagination
// ============================================================================
query :: proc(
@@ -1186,7 +1186,7 @@ query :: proc(
break
}
// Hit limit note there's more and stop
// Hit limit note there's more and stop
if count >= max_items {
has_more = true
break
@@ -1207,7 +1207,7 @@ query :: proc(
// ---- Sort key condition filtering ----
if skc, has_skc := sk_condition.?; has_skc {
if !evaluate_sort_key_condition(item, &skc) {
// Item doesn't match SK condition skip it
// Item doesn't match SK condition skip it
item_copy := item
item_destroy(&item_copy)
rocksdb.iter_next(&iter)
@@ -1381,7 +1381,7 @@ validate_item_key_types :: proc(
et, has_et := expected_type.?
if !has_et {
continue // No definition found skip validation (shouldn't happen)
continue // No definition found skip validation (shouldn't happen)
}
// Check actual type matches expected

View File

@@ -93,7 +93,7 @@ transact_write_result_destroy :: proc(result: ^Transact_Write_Result) {
}
// ============================================================================
// TransactWriteItems Execute an atomic batch of write operations
// TransactWriteItems Execute an atomic batch of write operations
//
// DynamoDB semantics:
// 1. Acquire exclusive locks on all involved tables
@@ -336,9 +336,9 @@ transact_write_items :: proc(
existing, read_err := get_item_internal(engine, action.table_name, key_item, metadata)
#partial switch read_err {
case .None:
// Item found or not found both fine.
// Item found or not found both fine.
case .RocksDB_Error, .Serialization_Error, .Internal_Error:
// Cannot safely determine old index keys cancel the entire transaction.
// Cannot safely determine old index keys cancel the entire transaction.
reasons[idx] = Cancellation_Reason{
code = "InternalError",
message = "Failed to read existing item for index maintenance",
@@ -356,7 +356,7 @@ transact_write_items :: proc(
return result, .Internal_Error
case .Table_Not_Found, .Item_Not_Found, .Validation_Error:
// These should not be returned by get_item_internal, but handle
// defensively treat as "item does not exist" and continue.
// defensively treat as "item does not exist" and continue.
}
old_items[idx] = existing
}
@@ -667,7 +667,7 @@ transact_get_result_destroy :: proc(result: ^Transact_Get_Result) {
}
// ============================================================================
// TransactGetItems Atomically read up to 100 items
// TransactGetItems Atomically read up to 100 items
//
// DynamoDB semantics:
// - All reads are performed with a consistent snapshot

View File

@@ -6,7 +6,7 @@ import "core:strings"
// DynamoDB AttributeValue - the core data type
Attribute_Value :: union {
String, // S
DDB_Number, // N decimal-preserving numeric type
DDB_Number, // N decimal-preserving numeric type
Binary, // B (base64)
Bool, // BOOL
Null, // NULL
@@ -384,7 +384,7 @@ build_attribute_value_with_type :: proc(raw_bytes: []byte, attr_type: Scalar_Att
if fb_ok {
return fallback_num
}
// Last resort return as string (shouldn't happen)
// Last resort return as string (shouldn't happen)
return String(strings.clone(string(raw_bytes)))
case .B:
return Binary(strings.clone(string(raw_bytes)))

View File

@@ -16,7 +16,7 @@ import "core:encoding/json"
import "core:strings"
// ============================================================================
// Update Plan parsed representation of an UpdateExpression
// Update Plan parsed representation of an UpdateExpression
// ============================================================================
Update_Action_Type :: enum {
@@ -207,7 +207,7 @@ parse_set_clause :: proc(
if comma == "," {
continue
}
// Not a comma put it back
// Not a comma put it back
t.pos = saved_pos
}
break
@@ -280,7 +280,7 @@ parse_set_value_expr :: proc(
}, true
}
// First token is a path check for path + :val or path - :val
// First token is a path check for path + :val or path - :val
source, source_resolved := resolve_attribute_name(first_tok, names)
if !source_resolved {
return {}, false
@@ -304,7 +304,7 @@ parse_set_value_expr :: proc(
source = source,
}, true
}
// Just a path reference treat as direct copy (SET a = b)
// Just a path reference treat as direct copy (SET a = b)
t.pos = peek_pos
return {}, false
}
@@ -368,7 +368,7 @@ parse_list_append :: proc(
return {}, false
}
// First operand could be :val or path
// First operand could be :val or path
first_tok, first_ok := next_token(t)
if !first_ok {
return {}, false
@@ -591,7 +591,7 @@ is_clause_keyword :: proc(tok: string) -> bool {
}
// ============================================================================
// Execute Update Plan apply mutations to an Item (in-place)
// Execute Update Plan apply mutations to an Item (in-place)
// ============================================================================
// Reasons an update plan can fail at execution time.
@@ -761,7 +761,7 @@ execute_update_plan :: proc(item: ^Item, plan: ^Update_Plan) -> Update_Exec_Erro
return .Add_Type_Mismatch
}
} else {
// Attribute doesn't exist create it
// Attribute doesn't exist create it
item[strings.clone(action.path)] = attr_value_deep_copy(action.value)
}
}

View File

@@ -4,7 +4,7 @@ import "core:strings"
import "core:sync"
import "../rocksdb"
// UpdateItem fetch existing item, apply update plan, write back
// UpdateItem fetch existing item, apply update plan, write back
// Uses EXCLUSIVE lock (write operation)
// ATOMICITY: Uses WriteBatch to ensure base item + all GSI updates are atomic
//
@@ -61,7 +61,7 @@ update_item :: proc(
// Save old item for ReturnValues (and for GSI cleanup)
old_item = item_deep_copy(existing_item)
} else if get_err == .NotFound || existing_encoded == nil {
// Item doesn't exist yet start with just the key attributes
// Item doesn't exist yet start with just the key attributes
existing_item = make(Item)
for ks in metadata.key_schema {

View File

@@ -1,9 +1,9 @@
// gsi_handlers.odin GSI-related HTTP handler helpers
// gsi_handlers.odin GSI-related HTTP handler helpers
//
// This file lives in the main package alongside main.odin.
// It provides:
// 1. parse_global_secondary_indexes parse GSI definitions from CreateTable request
// 2. parse_index_name extract IndexName from Query/Scan requests
// 1. parse_global_secondary_indexes parse GSI definitions from CreateTable request
// 2. parse_index_name extract IndexName from Query/Scan requests
// 3. Projection type helper for response building
package main
@@ -31,7 +31,7 @@ import "dynamodb"
// ]
// }
//
// Returns nil if no GSI definitions are present (valid GSIs are optional).
// Returns nil if no GSI definitions are present (valid GSIs are optional).
// ============================================================================
parse_global_secondary_indexes :: proc(
@@ -186,7 +186,7 @@ parse_single_gsi :: proc(
gsi.key_schema = key_schema
// Projection (optional defaults to ALL)
// Projection (optional defaults to ALL)
gsi.projection.projection_type = .ALL
if proj_val, proj_found := obj["Projection"]; proj_found {
if proj_obj, proj_ok := proj_val.(json.Object); proj_ok {

View File

@@ -12,8 +12,9 @@ Config :: struct {
host: string,
port: int,
data_dir: string,
verbose: bool,
//verbose: bool, // This got out of hand so removing it
access_key: string,
//secret_key: string, // So, fun fact. The AWS client takes the secret key and makes an HMAC signing key out of it so I would need to auth the header via this signature which now adds even more overhead for every request which is everything I want to avoid for this project
// HTTP server config
max_body_size: int,
@@ -96,7 +97,7 @@ parse_access_key_from_auth :: proc(auth_header: string) -> (access_key: string,
return "", false
}
// Find Credential= (case-sensitive per AWS spec)
// Find Credential= this is actually case-sensitive
cred_idx := strings.index(auth_header, "Credential=")
if cred_idx == -1 {
return "", false
@@ -381,7 +382,7 @@ handle_describe_table :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_R
strings.write_string(&builder, `]`)
// Include GSI Info INSIDE the Table object, before the closing braces
// Include GSI Info INSIDE the Table object, before the closing braces
if gsis, has_gsis := metadata.global_secondary_indexes.?; has_gsis && len(gsis) > 0 {
strings.write_string(&builder, `,"GlobalSecondaryIndexes":[`)
for gsi, gi in gsis {
@@ -490,11 +491,11 @@ handle_put_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Request
if !key_ok {
// If no explicit Key field, extract key from Item
// (PutItem doesn't have a Key field the key is in the Item itself)
// (PutItem doesn't have a Key field the key is in the Item itself)
existing_maybe, get_err := dynamodb.get_item(engine, table_name, item)
#partial switch get_err {
case .None:
// Item found or not found both are fine, condition evaluates against
// Item found or not found both are fine, condition evaluates against
// whatever was returned (nil item = item doesn't exist).
case .Table_Not_Found:
// Table will be caught and reported properly by put_item below.
@@ -648,7 +649,7 @@ handle_delete_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
existing_item, get_err := dynamodb.get_item(engine, table_name, key)
#partial switch get_err {
case .None:
// Item found or not found condition evaluates against whatever was returned.
// Item found or not found condition evaluates against whatever was returned.
case .Table_Not_Found:
// Table will be caught and reported properly by delete_item below.
case .Missing_Key_Attribute, .Invalid_Key:
@@ -658,7 +659,7 @@ handle_delete_item :: proc(engine: ^dynamodb.Storage_Engine, request: ^HTTP_Requ
make_error_response(response, .InternalServerError, "Failed to fetch existing item")
return
case .Validation_Error, .Item_Not_Found:
// Defensive shouldn't reach here normally.
// Defensive shouldn't reach here normally.
}
defer {
if ex, has_ex := existing_item.?; has_ex {
@@ -1810,8 +1811,8 @@ write_items_response_with_pagination :: proc(
//
// Maps storage errors to the correct DynamoDB error type AND HTTP status code.
// DynamoDB uses:
// 400 ValidationException, ResourceNotFoundException, ResourceInUseException, etc.
// 500 InternalServerError
// 400 ValidationException, ResourceNotFoundException, ResourceInUseException, etc.
// 500 InternalServerError
// ============================================================================
handle_storage_error :: proc(response: ^HTTP_Response, err: dynamodb.Storage_Error) {
@@ -2105,9 +2106,9 @@ validate_key_attributes_defined :: proc(key_schema: []dynamodb.Key_Schema_Elemen
// Error Response Helper
//
// Maps DynamoDB error types to correct HTTP status codes:
// 400 ValidationException, ResourceNotFoundException, ResourceInUseException,
// 400 ValidationException, ResourceNotFoundException, ResourceInUseException,
// ConditionalCheckFailedException, SerializationException
// 500 InternalServerError
// 500 InternalServerError
// ============================================================================
make_error_response :: proc(response: ^HTTP_Response, err_type: dynamodb.DynamoDB_Error_Type, message: string) -> HTTP_Response {
@@ -2140,7 +2141,7 @@ parse_config :: proc() -> Config {
host = "0.0.0.0",
port = 8002,
data_dir = "./data",
verbose = false,
//verbose = false,
max_body_size = 100 * 1024 * 1024, // 100 MB
max_headers = 100,
read_buffer_size = 8 * 1024, // 8 KB
@@ -2164,10 +2165,6 @@ parse_config :: proc() -> Config {
config.data_dir = data_dir
}
if verbose, ok := os.lookup_env("JORMUN_VERBOSE"); ok {
config.verbose = verbose == "1"
}
if max_body_str, ok := os.lookup_env("JORMUN_MAX_BODY_SIZE"); ok {
if max_body, parse_ok := strconv.parse_int(max_body_str); parse_ok {
config.max_body_size = max_body
@@ -2208,8 +2205,6 @@ parse_config :: proc() -> Config {
if value, ok := get_value(args, &i); ok {
config.data_dir = value
}
case "--verbose", "-v":
config.verbose = true
case "--max-body-size":
if value, ok := get_value(args, &i); ok {
if size, parse_ok := strconv.parse_int(value); parse_ok {
@@ -2244,7 +2239,6 @@ OPTIONS:
--host, -h <HOST> Server bind address (default: 0.0.0.0)
--port, -p <PORT> Server port (default: 8002)
--data-dir, -d <DIR> Data directory path (default: ./data)
--verbose, -v Enable verbose logging
--max-body-size <BYTES> Maximum request body size in bytes (default: 104857600 = 100MB)
--max-headers <COUNT> Maximum number of headers per request (default: 100)
--no-keep-alive Disable HTTP keep-alive connections
@@ -2254,7 +2248,6 @@ ENVIRONMENT VARIABLES:
JORMUN_HOST Same as --host
JORMUN_PORT Same as --port
JORMUN_DATA_DIR Same as --data-dir
JORMUN_VERBOSE Set to "1" to enable verbose mode
JORMUN_MAX_BODY_SIZE Same as --max-body-size
EXAMPLES:

View File

@@ -1,4 +1,4 @@
// transact_handlers.odin HTTP handlers for TransactWriteItems and TransactGetItems
// transact_handlers.odin HTTP handlers for TransactWriteItems and TransactGetItems
//
// Also contains the UPDATED_NEW / UPDATED_OLD filtering helper for UpdateItem.
package main