Update gpt/index.html
This commit is contained in:
+65
-21
@@ -336,7 +336,7 @@
|
||||
</div>
|
||||
|
||||
<div class="footer">
|
||||
<span>COMMANDS: help | config | clear | status</span>
|
||||
<span>COMMANDS: help | config | clear</span>
|
||||
<span id="timeDisplay"></span>
|
||||
</div>
|
||||
</div>
|
||||
@@ -353,6 +353,7 @@
|
||||
<option value="lmstudio">LM Studio (localhost:1234)</option>
|
||||
<option value="gpt4all">GPT4All (localhost:4891)</option>
|
||||
<option value="openai">OpenAI API</option>
|
||||
<option value="claude">Claude API (Anthropic)</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
@@ -368,7 +369,7 @@
|
||||
|
||||
<div class="config-group">
|
||||
<label>API KEY (optional)</label>
|
||||
<input type="password" id="apiKeyInput" placeholder="Leave empty for local models">
|
||||
<input type="text" id="apiKeyInput" placeholder="Leave empty for local models" autocomplete="off" data-form-type="other">
|
||||
</div>
|
||||
|
||||
<div class="config-buttons">
|
||||
@@ -501,7 +502,8 @@
|
||||
ollama: { endpoint: 'http://localhost:11434/api/chat', model: 'llama2', type: 'ollama' },
|
||||
lmstudio: { endpoint: 'http://localhost:1234/v1/chat/completions', model: 'local-model', type: 'openai' },
|
||||
gpt4all: { endpoint: 'http://localhost:4891/v1/chat/completions', model: 'gpt4all-model', type: 'openai' },
|
||||
openai: { endpoint: 'https://api.openai.com/v1/chat/completions', model: 'gpt-3.5-turbo', type: 'openai' }
|
||||
openai: { endpoint: 'https://api.openai.com/v1/chat/completions', model: 'gpt-4o-mini', type: 'openai' },
|
||||
claude: { endpoint: 'https://api.anthropic.com/v1/messages', model: 'claude-sonnet-4-20250514', type: 'claude' }
|
||||
};
|
||||
|
||||
const preset = presets[this.value];
|
||||
@@ -512,6 +514,32 @@
|
||||
}
|
||||
});
|
||||
|
||||
/* ============================================
|
||||
HELPER FUNCTIONS
|
||||
============================================ */
|
||||
function getFormattedDate() {
|
||||
const now = new Date();
|
||||
const dd = String(now.getDate()).padStart(2, '0');
|
||||
const mm = String(now.getMonth() + 1).padStart(2, '0');
|
||||
const yyyy = now.getFullYear();
|
||||
const HH = String(now.getHours()).padStart(2, '0');
|
||||
const MM = String(now.getMinutes()).padStart(2, '0');
|
||||
const SS = String(now.getSeconds()).padStart(2, '0');
|
||||
return `${dd}/${mm}/${yyyy}:${HH}/${MM}/${SS}`;
|
||||
}
|
||||
|
||||
function getStatusInfo() {
|
||||
return `[Connection: ${aiConfig.enabled ? 'ONLINE' : 'OFFLINE'}, Endpoint: ${aiConfig.endpoint || 'none'}, Model: ${aiConfig.model || 'none'}]`;
|
||||
}
|
||||
|
||||
function preprocessMessage(message) {
|
||||
// Replace "date" keyword with actual date (case insensitive, whole word)
|
||||
let processed = message.replace(/\bdate\b/gi, `date (${getFormattedDate()})`);
|
||||
// Replace "status" keyword with actual status info (case insensitive, whole word)
|
||||
processed = processed.replace(/\bstatus\b/gi, `status ${getStatusInfo()}`);
|
||||
return processed;
|
||||
}
|
||||
|
||||
/* ============================================
|
||||
AI COMMUNICATION
|
||||
============================================ */
|
||||
@@ -523,11 +551,14 @@
|
||||
addLine('To connect an AI backend:', 'system');
|
||||
addLine(' 1. Type "config" to open settings', 'system');
|
||||
addLine(' 2. Choose a preset or enter custom endpoint', 'system');
|
||||
addLine(' 3. Supported: Ollama, LM Studio, GPT4All, OpenAI', 'system');
|
||||
addLine(' 3. Supported: Ollama, LM Studio, GPT4All, OpenAI, Claude', 'system');
|
||||
return;
|
||||
}
|
||||
|
||||
isProcessing = true;
|
||||
|
||||
// Preprocess message to replace date/status keywords
|
||||
const processedMessage = preprocessMessage(message);
|
||||
|
||||
try {
|
||||
let response;
|
||||
@@ -538,7 +569,7 @@
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: aiConfig.model,
|
||||
messages: [{ role: 'user', content: message }],
|
||||
messages: [{ role: 'user', content: processedMessage }],
|
||||
stream: false
|
||||
})
|
||||
});
|
||||
@@ -550,6 +581,30 @@
|
||||
addLine('Error: ' + data.error, 'error');
|
||||
}
|
||||
|
||||
} else if (aiConfig.type === 'claude') {
|
||||
// Claude/Anthropic API
|
||||
response = await fetch(aiConfig.endpoint, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': aiConfig.apiKey,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-dangerous-direct-browser-access': 'true'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: aiConfig.model,
|
||||
max_tokens: 1024,
|
||||
messages: [{ role: 'user', content: processedMessage }]
|
||||
})
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
if (data.content && data.content[0]) {
|
||||
addLine(data.content[0].text, 'ai');
|
||||
} else if (data.error) {
|
||||
addLine('Error: ' + (data.error.message || data.error), 'error');
|
||||
}
|
||||
|
||||
} else {
|
||||
// OpenAI-compatible API (LM Studio, GPT4All, OpenAI, etc.)
|
||||
const headers = { 'Content-Type': 'application/json' };
|
||||
@@ -562,7 +617,7 @@
|
||||
headers: headers,
|
||||
body: JSON.stringify({
|
||||
model: aiConfig.model,
|
||||
messages: [{ role: 'user', content: message }],
|
||||
messages: [{ role: 'user', content: processedMessage }],
|
||||
stream: false
|
||||
})
|
||||
});
|
||||
@@ -602,9 +657,11 @@
|
||||
addLine('Available commands:', 'system');
|
||||
addLine(' help - Show this help', 'system');
|
||||
addLine(' config - Configure AI backend', 'system');
|
||||
addLine(' status - Show connection status', 'system');
|
||||
addLine(' clear - Clear screen', 'system');
|
||||
addLine(' date - Show current date/time', 'system');
|
||||
addLine('', '');
|
||||
addLine('Keywords (replaced in messages to AI):', 'system');
|
||||
addLine(' date - Replaced with current date/time', 'system');
|
||||
addLine(' status - Replaced with connection info', 'system');
|
||||
addLine('', '');
|
||||
addLine('Any other input is sent to the AI', 'system');
|
||||
break;
|
||||
@@ -613,23 +670,10 @@
|
||||
openConfig();
|
||||
break;
|
||||
|
||||
case 'status':
|
||||
addLine('', '');
|
||||
addLine('CONNECTION STATUS:', 'system');
|
||||
addLine(' Enabled: ' + (aiConfig.enabled ? 'YES' : 'NO'), 'system');
|
||||
addLine(' Endpoint: ' + (aiConfig.endpoint || 'Not configured'), 'system');
|
||||
addLine(' Model: ' + (aiConfig.model || 'Not configured'), 'system');
|
||||
addLine(' Type: ' + (aiConfig.type || 'unknown'), 'system');
|
||||
break;
|
||||
|
||||
case 'clear':
|
||||
clearOutput();
|
||||
break;
|
||||
|
||||
case 'date':
|
||||
addLine(new Date().toString(), 'system');
|
||||
break;
|
||||
|
||||
default:
|
||||
queryAI(trimmed);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user