feat(content-engine): enhance content pruning rule in orchestrator
Some checks failed
Monorepo Pipeline / ⚡ Prioritize Release (push) Successful in 2s
Monorepo Pipeline / 🏗️ Build (push) Has been cancelled
Monorepo Pipeline / 🚀 Release (push) Has been cancelled
Monorepo Pipeline / 🐳 Build Directus (Base) (push) Has been cancelled
Monorepo Pipeline / 🧹 Lint (push) Has been cancelled
Monorepo Pipeline / 🧪 Test (push) Has been cancelled
Monorepo Pipeline / 🐳 Build Gatekeeper (Product) (push) Has been cancelled
Monorepo Pipeline / 🐳 Build Build-Base (push) Has been cancelled
Monorepo Pipeline / 🐳 Build Production Runtime (push) Has been cancelled

This commit is contained in:
2026-02-22 18:53:17 +01:00
parent baecc9c83c
commit b3d089ac6d
10 changed files with 830 additions and 114 deletions

View File

@@ -0,0 +1,139 @@
import * as faceapi from '@vladmandic/face-api';
// Provide Canvas fallback for face-api in Node.js
import { Canvas, Image, ImageData } from 'canvas';
import sharp from 'sharp';
import * as path from 'node:path';
import { fileURLToPath } from 'node:url';
// @ts-ignore
faceapi.env.monkeyPatch({ Canvas, Image, ImageData });
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Path to the downloaded models
const MODELS_PATH = path.join(__dirname, '..', 'models');
let isModelsLoaded = false;
async function loadModels() {
if (isModelsLoaded) return;
await faceapi.nets.tinyFaceDetector.loadFromDisk(MODELS_PATH);
isModelsLoaded = true;
}
export interface ProcessImageOptions {
width: number;
height: number;
format?: 'webp' | 'jpeg' | 'png' | 'avif';
quality?: number;
}
export async function processImageWithSmartCrop(
inputBuffer: Buffer,
options: ProcessImageOptions
): Promise<Buffer> {
await loadModels();
// Load image via Canvas for face-api
const img = new Image();
img.src = inputBuffer;
// Detect faces
const detections = await faceapi.detectAllFaces(
// @ts-ignore
img,
new faceapi.TinyFaceDetectorOptions()
);
const sharpImage = sharp(inputBuffer);
const metadata = await sharpImage.metadata();
if (!metadata.width || !metadata.height) {
throw new Error('Could not read image metadata');
}
// If faces are found, calculate the bounding box containing all faces
if (detections.length > 0) {
let minX = metadata.width;
let minY = metadata.height;
let maxX = 0;
let maxY = 0;
for (const det of detections) {
const { x, y, width, height } = det.box;
if (x < minX) minX = Math.max(0, x);
if (y < minY) minY = Math.max(0, y);
if (x + width > maxX) maxX = Math.min(metadata.width, x + width);
if (y + height > maxY) maxY = Math.min(metadata.height, y + height);
}
const faceBoxWidth = maxX - minX;
const faceBoxHeight = maxY - minY;
// Calculate center of the faces
const centerX = Math.floor(minX + faceBoxWidth / 2);
const centerY = Math.floor(minY + faceBoxHeight / 2);
// Provide this as a focus point for sharp's extract or resize
// We can use sharp's resize with `position` focusing on crop options,
// or calculate an exact bounding box. However, extracting an exact bounding box
// and then resizing usually yields the best results when focusing on a specific coordinate.
// A simpler approach is to crop a rectangle with the target aspect ratio
// centered on the faces, then resize. Let's calculate the crop box.
const targetRatio = options.width / options.height;
const currentRatio = metadata.width / metadata.height;
let cropWidth = metadata.width;
let cropHeight = metadata.height;
if (currentRatio > targetRatio) {
// Image is wider than target, calculate new width
cropWidth = Math.floor(metadata.height * targetRatio);
} else {
// Image is taller than target, calculate new height
cropHeight = Math.floor(metadata.width / targetRatio);
}
// Try to center the crop box around the faces
let cropX = Math.floor(centerX - cropWidth / 2);
let cropY = Math.floor(centerY - cropHeight / 2);
// Keep crop box within image bounds
if (cropX < 0) cropX = 0;
if (cropY < 0) cropY = 0;
if (cropX + cropWidth > metadata.width) cropX = metadata.width - cropWidth;
if (cropY + cropHeight > metadata.height) cropY = metadata.height - cropHeight;
sharpImage.extract({
left: cropX,
top: cropY,
width: cropWidth,
height: cropHeight
});
}
// Finally, resize to the requested dimensions and format
let finalImage = sharpImage.resize(options.width, options.height, {
// If faces weren't found, default to entropy/attention based cropping as fallback
fit: 'cover',
position: detections.length > 0 ? 'center' : 'attention'
});
const format = options.format || 'webp';
const quality = options.quality || 80;
if (format === 'webp') {
finalImage = finalImage.webp({ quality });
} else if (format === 'jpeg') {
finalImage = finalImage.jpeg({ quality });
} else if (format === 'png') {
finalImage = finalImage.png({ quality });
} else if (format === 'avif') {
finalImage = finalImage.avif({ quality });
}
return finalImage.toBuffer();
}