feat: migrate npm registry from Verdaccio to Gitea Packages
Some checks failed
Monorepo Pipeline / ⚡ Prioritize Release (push) Successful in 1s
Monorepo Pipeline / 🧹 Lint (push) Failing after 35s
Monorepo Pipeline / 🧪 Test (push) Failing after 35s
Monorepo Pipeline / 🏗️ Build (push) Failing after 12s
Monorepo Pipeline / 🚀 Release (push) Has been skipped
Monorepo Pipeline / 🐳 Build Image Processor (push) Has been skipped
Monorepo Pipeline / 🐳 Build Directus (Base) (push) Has been skipped
Monorepo Pipeline / 🐳 Build Gatekeeper (Product) (push) Has been skipped
Monorepo Pipeline / 🐳 Build Build-Base (push) Has been skipped
Monorepo Pipeline / 🐳 Build Production Runtime (push) Has been skipped

This commit is contained in:
2026-02-27 00:12:00 +01:00
parent efd1341762
commit 5da88356a8
69 changed files with 5397 additions and 114 deletions

View File

@@ -77,7 +77,7 @@ export class ResearchAgent {
// but formatted as "facts".
const synthesis = await this.openai.chat.completions.create({
model: "google/gemini-2.0-flash-001",
model: "google/gemini-3-flash-preview",
messages: [
{
role: "system",
@@ -186,7 +186,7 @@ Return JSON: { "facts": [ { "statement": "...", "source": "Organization Name Onl
// Step 1: Ask the LLM to generate a highly specific YouTube search query
// We want tutorials, explanations, or deep dives.
const queryGen = await this.openai.chat.completions.create({
model: "google/gemini-2.5-flash",
model: "google/gemini-3-flash-preview",
messages: [
{
role: "system",
@@ -262,7 +262,7 @@ RULES:
Return ONLY a JSON object: {"bestVideoId": number}`;
const evalResponse = await this.openai.chat.completions.create({
model: "google/gemini-2.5-flash",
model: "google/gemini-3-flash-preview",
messages: [{ role: "system", content: evalPrompt }],
response_format: { type: "json_object" },
});
@@ -320,7 +320,7 @@ Return ONLY a JSON object: {"bestVideoId": number}`;
topic: string,
): Promise<{ trendsKeywords: string[]; dcVariables: string[] }> {
const response = await this.openai.chat.completions.create({
model: "google/gemini-2.0-flash-001",
model: "google/gemini-3-flash-preview",
messages: [
{
role: "system",
@@ -378,7 +378,7 @@ CRITICAL: Do NOT provide more than 2 trendsKeywords. Keep it extremely focused.`
// Step 1: LLM generates the optimal Google Search query
const queryGen = await this.openai.chat.completions.create({
model: "google/gemini-2.5-flash",
model: "google/gemini-3-flash-preview",
messages: [
{
role: "system",

View File

@@ -34,7 +34,7 @@ export class TrendsClient {
);
try {
const response = await this.openai.chat.completions.create({
model: "google/gemini-2.5-flash",
model: "google/gemini-3-flash-preview",
messages: [
{
role: "system",