feat: migrate npm registry from Verdaccio to Gitea Packages
Some checks failed
Monorepo Pipeline / ⚡ Prioritize Release (push) Successful in 1s
Monorepo Pipeline / 🧹 Lint (push) Failing after 35s
Monorepo Pipeline / 🧪 Test (push) Failing after 35s
Monorepo Pipeline / 🏗️ Build (push) Failing after 12s
Monorepo Pipeline / 🚀 Release (push) Has been skipped
Monorepo Pipeline / 🐳 Build Image Processor (push) Has been skipped
Monorepo Pipeline / 🐳 Build Directus (Base) (push) Has been skipped
Monorepo Pipeline / 🐳 Build Gatekeeper (Product) (push) Has been skipped
Monorepo Pipeline / 🐳 Build Build-Base (push) Has been skipped
Monorepo Pipeline / 🐳 Build Production Runtime (push) Has been skipped
Some checks failed
Monorepo Pipeline / ⚡ Prioritize Release (push) Successful in 1s
Monorepo Pipeline / 🧹 Lint (push) Failing after 35s
Monorepo Pipeline / 🧪 Test (push) Failing after 35s
Monorepo Pipeline / 🏗️ Build (push) Failing after 12s
Monorepo Pipeline / 🚀 Release (push) Has been skipped
Monorepo Pipeline / 🐳 Build Image Processor (push) Has been skipped
Monorepo Pipeline / 🐳 Build Directus (Base) (push) Has been skipped
Monorepo Pipeline / 🐳 Build Gatekeeper (Product) (push) Has been skipped
Monorepo Pipeline / 🐳 Build Build-Base (push) Has been skipped
Monorepo Pipeline / 🐳 Build Production Runtime (push) Has been skipped
This commit is contained in:
133
packages/concept-engine/src/llm-client.ts
Normal file
133
packages/concept-engine/src/llm-client.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
// ============================================================================
|
||||
// LLM Client — Unified interface with model routing via OpenRouter
|
||||
// ============================================================================
|
||||
|
||||
import axios from "axios";
|
||||
|
||||
interface LLMRequestOptions {
|
||||
model: string;
|
||||
systemPrompt: string;
|
||||
userPrompt: string;
|
||||
jsonMode?: boolean;
|
||||
apiKey: string;
|
||||
}
|
||||
|
||||
interface LLMResponse {
|
||||
content: string;
|
||||
usage: {
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
cost: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean raw LLM output to parseable JSON.
|
||||
* Handles markdown fences, control chars, trailing commas.
|
||||
*/
|
||||
export function cleanJson(str: string): string {
|
||||
let cleaned = str.replace(/```json\n?|```/g, "").trim();
|
||||
cleaned = cleaned.replace(
|
||||
/[\u0000-\u0009\u000B\u000C\u000E-\u001F\u007F-\u009F]/g,
|
||||
" ",
|
||||
);
|
||||
cleaned = cleaned.replace(/,\s*([\]}])/g, "$1");
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a request to an LLM via OpenRouter.
|
||||
*/
|
||||
export async function llmRequest(options: LLMRequestOptions): Promise<LLMResponse> {
|
||||
const { model, systemPrompt, userPrompt, jsonMode = true, apiKey } = options;
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const resp = await axios.post(
|
||||
"https://openrouter.ai/api/v1/chat/completions",
|
||||
{
|
||||
model,
|
||||
messages: [
|
||||
{ role: "system", content: systemPrompt },
|
||||
{ role: "user", content: userPrompt },
|
||||
],
|
||||
...(jsonMode ? { response_format: { type: "json_object" } } : {}),
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
timeout: 120000,
|
||||
},
|
||||
).catch(err => {
|
||||
if (err.response) {
|
||||
console.error("OpenRouter API Error:", JSON.stringify(err.response.data, null, 2));
|
||||
}
|
||||
throw err;
|
||||
});
|
||||
|
||||
const content = resp.data.choices?.[0]?.message?.content;
|
||||
if (!content) {
|
||||
throw new Error(`LLM returned no content. Model: ${model}`);
|
||||
}
|
||||
|
||||
let cost = 0;
|
||||
const usage = resp.data.usage || {};
|
||||
if (usage.cost !== undefined) {
|
||||
cost = usage.cost;
|
||||
} else {
|
||||
// Fallback estimation
|
||||
cost =
|
||||
(usage.prompt_tokens || 0) * (0.1 / 1_000_000) +
|
||||
(usage.completion_tokens || 0) * (0.4 / 1_000_000);
|
||||
}
|
||||
|
||||
return {
|
||||
content,
|
||||
usage: {
|
||||
promptTokens: usage.prompt_tokens || 0,
|
||||
completionTokens: usage.completion_tokens || 0,
|
||||
cost,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a request and parse the response as JSON.
|
||||
*/
|
||||
export async function llmJsonRequest<T = any>(
|
||||
options: LLMRequestOptions,
|
||||
): Promise<{ data: T; usage: LLMResponse["usage"] }> {
|
||||
const response = await llmRequest({ ...options, jsonMode: true });
|
||||
const cleaned = cleanJson(response.content);
|
||||
|
||||
let parsed: T;
|
||||
try {
|
||||
parsed = JSON.parse(cleaned);
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
`Failed to parse LLM JSON response: ${(e as Error).message}\nRaw: ${cleaned.substring(0, 500)}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Unwrap common LLM artifacts: {"0": {...}}, {"state": {...}}, etc.
|
||||
const unwrapped = unwrapResponse(parsed);
|
||||
|
||||
return { data: unwrapped as T, usage: response.usage };
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively unwrap common LLM wrapping patterns.
|
||||
*/
|
||||
function unwrapResponse(obj: any): any {
|
||||
if (!obj || typeof obj !== "object" || Array.isArray(obj)) return obj;
|
||||
const keys = Object.keys(obj);
|
||||
if (keys.length === 1) {
|
||||
const key = keys[0];
|
||||
if (key === "0" || key === "state" || key === "facts" || key === "result" || key === "data") {
|
||||
return unwrapResponse(obj[key]);
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
Reference in New Issue
Block a user