Files
context-paging/conversation-runner.php
biondizzle f4aafb1095 Remove hardcoded Redis URL from conversation-runner.php
- Let index.php handle REDIS_URL from .env
- No need to pass it through subprocess environment
2026-03-28 09:08:37 +00:00

256 lines
7.4 KiB
PHP

<?php
/**
* Conversation Runner — Thin wrapper around index.php
*
* Manages conversation state and prompts, pipes everything through
* the context-paging pipeline in index.php.
*
* Usage: php conversation-runner.php
*
* Environment variables (passed to index.php):
* API_BASE_URL — Override endpoint (default: from conversation file)
* MAX_CONTEXT_TOKENS — Override context limit (default: from conversation file)
*
* Note: REDIS_URL, VLLM_URL, etc. are read from .env by index.php
*/
require __DIR__ . '/vendor/autoload.php';
use ContextPaging\TokenCounter;
// -----------------------------------------------------
// Configuration
// -----------------------------------------------------
$conversationFile = __DIR__ . '/conversations/coding-session.json';
// -----------------------------------------------------
// Load conversation state
// -----------------------------------------------------
if (!file_exists($conversationFile)) {
echo "Error: Conversation file not found: {$conversationFile}\n";
exit(1);
}
$conversation = json_decode(file_get_contents($conversationFile), true);
if (json_last_error() !== JSON_ERROR_NONE) {
echo "Error: Invalid JSON in conversation file\n";
exit(1);
}
// -----------------------------------------------------
// Model-specific settings
// -----------------------------------------------------
$modelConfig = [
'HuggingFaceTB/SmolLM3-3B' => [
'system_prompt' => '/no_think',
'reasoning_parser_workaround' => true,
],
];
$currentModel = $conversation['model'];
$modelSettings = $modelConfig[$currentModel] ?? [];
// -----------------------------------------------------
// Prompts for each turn (excerpted for brevity - full list below)
// -----------------------------------------------------
$prompts = require __DIR__ . '/prompts.php';
// -----------------------------------------------------
// Determine next turn
// -----------------------------------------------------
$nextTurn = count($conversation['turns']) + 1;
if (!isset($prompts[$nextTurn])) {
echo "No more prompts defined. Current turn: {$nextTurn}\n";
echo "Conversation complete.\n";
exit(0);
}
$nextPrompt = $prompts[$nextTurn];
// -----------------------------------------------------
// Build messages array
// -----------------------------------------------------
$messages = [];
// Inject model-specific system prompt if configured
if (!empty($modelSettings['system_prompt'])) {
$messages[] = ['role' => 'system', 'content' => $modelSettings['system_prompt']];
}
// Add conversation history
foreach ($conversation['turns'] as $turn) {
$messages[] = ['role' => 'user', 'content' => $turn['user']];
$assistantMsg = ['role' => 'assistant'];
if (isset($turn['assistant'])) {
$assistantMsg['content'] = $turn['assistant'];
}
if (isset($turn['tool_calls'])) {
$assistantMsg['tool_calls'] = $turn['tool_calls'];
}
$messages[] = $assistantMsg;
}
// Add the next prompt
$messages[] = ['role' => 'user', 'content' => $nextPrompt];
// -----------------------------------------------------
// Build payload for index.php
// -----------------------------------------------------
$payload = [
'messages' => $messages,
'model' => $conversation['model'],
'max_tokens' => 8000,
'temperature' => 0.7,
];
// -----------------------------------------------------
// Show turn info
// -----------------------------------------------------
echo "=== TURN {$nextTurn} ===\n";
echo "User: " . substr($nextPrompt, 0, 100) . "...\n\n";
// Count tokens before
$tokenCounter = new TokenCounter();
$originalTokens = $tokenCounter->contextSize($messages);
echo "Input tokens: {$originalTokens}\n";
echo "Context limit: {$conversation['max_context']}\n\n";
// -----------------------------------------------------
// Call index.php with the payload
// -----------------------------------------------------
$payloadJson = json_encode($payload);
$indexPhp = __DIR__ . '/index.php';
// Set environment variables for index.php
$env = [
'API_BASE_URL' => $conversation['endpoint'],
'MAX_CONTEXT_TOKENS' => (string) $conversation['max_context'],
];
// Build the command
$envString = '';
foreach ($env as $key => $value) {
$envString .= "{$key}=" . escapeshellarg($value) . ' ';
}
$command = "{$envString} php {$indexPhp} " . escapeshellarg($payloadJson) . " 2>&1";
// Execute
$output = shell_exec($command);
if ($output === null) {
echo "Error: Failed to execute index.php\n";
exit(1);
}
// -----------------------------------------------------
// Parse response
// -----------------------------------------------------
$response = json_decode($output, true);
if (json_last_error() !== JSON_ERROR_NONE) {
echo "Error: Invalid JSON response from index.php\n";
echo "Raw output:\n{$output}\n";
exit(1);
}
if (isset($response['error'])) {
echo "ERROR: " . json_encode($response['error'], JSON_PRETTY_PRINT) . "\n";
exit(1);
}
// Extract response data
$message = $response['choices'][0]['message'] ?? [];
$assistantContent = $message['content'] ?? null;
$toolCalls = $message['tool_calls'] ?? null;
$reasoning = $message['reasoning'] ?? null;
$usage = $response['usage'] ?? [];
// Handle reasoning workaround
if ($assistantContent === null && $reasoning !== null && !empty($modelSettings['reasoning_parser_workaround'])) {
echo "Note: Using reasoning as content (parser workaround)\n";
$assistantContent = $reasoning;
unset($reasoning);
}
// -----------------------------------------------------
// Display results
// -----------------------------------------------------
if ($toolCalls) {
echo "Tool calls: " . json_encode($toolCalls, JSON_PRETTY_PRINT) . "\n";
}
if ($reasoning) {
echo "Reasoning: " . substr($reasoning, 0, 200) . "...\n";
}
echo "Assistant: " . substr($assistantContent ?? '(null)', 0, 300) . "...\n\n";
// -----------------------------------------------------
// Usage stats
// -----------------------------------------------------
if ($usage) {
echo "=== USAGE ===\n";
echo "Prompt tokens: {$usage['prompt_tokens']}\n";
echo "Completion tokens: {$usage['completion_tokens']}\n";
echo "Total tokens: {$usage['total_tokens']}\n";
echo "Context limit: {$conversation['max_context']}\n";
echo "Remaining: " . ($conversation['max_context'] - $usage['prompt_tokens']) . "\n\n";
}
// -----------------------------------------------------
// Save turn
// -----------------------------------------------------
$turnData = [
'turn' => $nextTurn,
'user' => $nextPrompt,
'usage' => $usage,
];
if ($assistantContent !== null) {
$turnData['assistant'] = $assistantContent;
}
if ($toolCalls) {
$turnData['tool_calls'] = $toolCalls;
}
if ($reasoning) {
$turnData['reasoning'] = $reasoning;
}
$conversation['turns'][] = $turnData;
file_put_contents($conversationFile, json_encode($conversation, JSON_PRETTY_PRINT | JSON_UNESCAPED_UNICODE));
echo "=== SAVED ===\n";
echo "Turn {$nextTurn} saved.\n";
// -----------------------------------------------------
// Context usage warning
// -----------------------------------------------------
if ($usage) {
$contextPercent = ($usage['prompt_tokens'] / $conversation['max_context']) * 100;
echo "Context usage: " . round($contextPercent, 1) . "%\n";
if ($contextPercent > 80) {
echo "\n*** APPROACHING CONTEXT LIMIT (" . round($contextPercent, 1) . "%) ***\n";
}
if ($contextPercent > 95) {
echo "*** STOP - Context nearly exhausted ***\n";
}
}