Update gpt/index.html

This commit is contained in:
2026-05-10 17:28:14 +02:00
parent 86572e0842
commit 2fe636fc90
+65 -21
View File
@@ -336,7 +336,7 @@
</div> </div>
<div class="footer"> <div class="footer">
<span>COMMANDS: help | config | clear | status</span> <span>COMMANDS: help | config | clear</span>
<span id="timeDisplay"></span> <span id="timeDisplay"></span>
</div> </div>
</div> </div>
@@ -353,6 +353,7 @@
<option value="lmstudio">LM Studio (localhost:1234)</option> <option value="lmstudio">LM Studio (localhost:1234)</option>
<option value="gpt4all">GPT4All (localhost:4891)</option> <option value="gpt4all">GPT4All (localhost:4891)</option>
<option value="openai">OpenAI API</option> <option value="openai">OpenAI API</option>
<option value="claude">Claude API (Anthropic)</option>
</select> </select>
</div> </div>
@@ -368,7 +369,7 @@
<div class="config-group"> <div class="config-group">
<label>API KEY (optional)</label> <label>API KEY (optional)</label>
<input type="password" id="apiKeyInput" placeholder="Leave empty for local models"> <input type="text" id="apiKeyInput" placeholder="Leave empty for local models" autocomplete="off" data-form-type="other">
</div> </div>
<div class="config-buttons"> <div class="config-buttons">
@@ -501,7 +502,8 @@
ollama: { endpoint: 'http://localhost:11434/api/chat', model: 'llama2', type: 'ollama' }, ollama: { endpoint: 'http://localhost:11434/api/chat', model: 'llama2', type: 'ollama' },
lmstudio: { endpoint: 'http://localhost:1234/v1/chat/completions', model: 'local-model', type: 'openai' }, lmstudio: { endpoint: 'http://localhost:1234/v1/chat/completions', model: 'local-model', type: 'openai' },
gpt4all: { endpoint: 'http://localhost:4891/v1/chat/completions', model: 'gpt4all-model', type: 'openai' }, gpt4all: { endpoint: 'http://localhost:4891/v1/chat/completions', model: 'gpt4all-model', type: 'openai' },
openai: { endpoint: 'https://api.openai.com/v1/chat/completions', model: 'gpt-3.5-turbo', type: 'openai' } openai: { endpoint: 'https://api.openai.com/v1/chat/completions', model: 'gpt-4o-mini', type: 'openai' },
claude: { endpoint: 'https://api.anthropic.com/v1/messages', model: 'claude-sonnet-4-20250514', type: 'claude' }
}; };
const preset = presets[this.value]; const preset = presets[this.value];
@@ -512,6 +514,32 @@
} }
}); });
/* ============================================
HELPER FUNCTIONS
============================================ */
function getFormattedDate() {
const now = new Date();
const dd = String(now.getDate()).padStart(2, '0');
const mm = String(now.getMonth() + 1).padStart(2, '0');
const yyyy = now.getFullYear();
const HH = String(now.getHours()).padStart(2, '0');
const MM = String(now.getMinutes()).padStart(2, '0');
const SS = String(now.getSeconds()).padStart(2, '0');
return `${dd}/${mm}/${yyyy}:${HH}/${MM}/${SS}`;
}
function getStatusInfo() {
return `[Connection: ${aiConfig.enabled ? 'ONLINE' : 'OFFLINE'}, Endpoint: ${aiConfig.endpoint || 'none'}, Model: ${aiConfig.model || 'none'}]`;
}
function preprocessMessage(message) {
// Replace "date" keyword with actual date (case insensitive, whole word)
let processed = message.replace(/\bdate\b/gi, `date (${getFormattedDate()})`);
// Replace "status" keyword with actual status info (case insensitive, whole word)
processed = processed.replace(/\bstatus\b/gi, `status ${getStatusInfo()}`);
return processed;
}
/* ============================================ /* ============================================
AI COMMUNICATION AI COMMUNICATION
============================================ */ ============================================ */
@@ -523,12 +551,15 @@
addLine('To connect an AI backend:', 'system'); addLine('To connect an AI backend:', 'system');
addLine(' 1. Type "config" to open settings', 'system'); addLine(' 1. Type "config" to open settings', 'system');
addLine(' 2. Choose a preset or enter custom endpoint', 'system'); addLine(' 2. Choose a preset or enter custom endpoint', 'system');
addLine(' 3. Supported: Ollama, LM Studio, GPT4All, OpenAI', 'system'); addLine(' 3. Supported: Ollama, LM Studio, GPT4All, OpenAI, Claude', 'system');
return; return;
} }
isProcessing = true; isProcessing = true;
// Preprocess message to replace date/status keywords
const processedMessage = preprocessMessage(message);
try { try {
let response; let response;
@@ -538,7 +569,7 @@
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ body: JSON.stringify({
model: aiConfig.model, model: aiConfig.model,
messages: [{ role: 'user', content: message }], messages: [{ role: 'user', content: processedMessage }],
stream: false stream: false
}) })
}); });
@@ -550,6 +581,30 @@
addLine('Error: ' + data.error, 'error'); addLine('Error: ' + data.error, 'error');
} }
} else if (aiConfig.type === 'claude') {
// Claude/Anthropic API
response = await fetch(aiConfig.endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': aiConfig.apiKey,
'anthropic-version': '2023-06-01',
'anthropic-dangerous-direct-browser-access': 'true'
},
body: JSON.stringify({
model: aiConfig.model,
max_tokens: 1024,
messages: [{ role: 'user', content: processedMessage }]
})
});
const data = await response.json();
if (data.content && data.content[0]) {
addLine(data.content[0].text, 'ai');
} else if (data.error) {
addLine('Error: ' + (data.error.message || data.error), 'error');
}
} else { } else {
// OpenAI-compatible API (LM Studio, GPT4All, OpenAI, etc.) // OpenAI-compatible API (LM Studio, GPT4All, OpenAI, etc.)
const headers = { 'Content-Type': 'application/json' }; const headers = { 'Content-Type': 'application/json' };
@@ -562,7 +617,7 @@
headers: headers, headers: headers,
body: JSON.stringify({ body: JSON.stringify({
model: aiConfig.model, model: aiConfig.model,
messages: [{ role: 'user', content: message }], messages: [{ role: 'user', content: processedMessage }],
stream: false stream: false
}) })
}); });
@@ -602,9 +657,11 @@
addLine('Available commands:', 'system'); addLine('Available commands:', 'system');
addLine(' help - Show this help', 'system'); addLine(' help - Show this help', 'system');
addLine(' config - Configure AI backend', 'system'); addLine(' config - Configure AI backend', 'system');
addLine(' status - Show connection status', 'system');
addLine(' clear - Clear screen', 'system'); addLine(' clear - Clear screen', 'system');
addLine(' date - Show current date/time', 'system'); addLine('', '');
addLine('Keywords (replaced in messages to AI):', 'system');
addLine(' date - Replaced with current date/time', 'system');
addLine(' status - Replaced with connection info', 'system');
addLine('', ''); addLine('', '');
addLine('Any other input is sent to the AI', 'system'); addLine('Any other input is sent to the AI', 'system');
break; break;
@@ -613,23 +670,10 @@
openConfig(); openConfig();
break; break;
case 'status':
addLine('', '');
addLine('CONNECTION STATUS:', 'system');
addLine(' Enabled: ' + (aiConfig.enabled ? 'YES' : 'NO'), 'system');
addLine(' Endpoint: ' + (aiConfig.endpoint || 'Not configured'), 'system');
addLine(' Model: ' + (aiConfig.model || 'Not configured'), 'system');
addLine(' Type: ' + (aiConfig.type || 'unknown'), 'system');
break;
case 'clear': case 'clear':
clearOutput(); clearOutput();
break; break;
case 'date':
addLine(new Date().toString(), 'system');
break;
default: default:
queryAI(trimmed); queryAI(trimmed);
} }