161 lines
5.4 KiB
TypeScript
161 lines
5.4 KiB
TypeScript
import { NextResponse } from 'next/server'
|
|
import OpenAI from 'openai'
|
|
|
|
type LlmProvider = 'openai' | 'openrouter'
|
|
|
|
function getProvider(): LlmProvider {
|
|
const configured = (process.env.LLM_PROVIDER ?? '').toLowerCase()
|
|
if (configured === 'openrouter') return 'openrouter'
|
|
if (configured === 'openai') return 'openai'
|
|
return process.env.OPENROUTER_API_KEY ? 'openrouter' : 'openai'
|
|
}
|
|
|
|
function createClient(provider: LlmProvider) {
|
|
if (provider === 'openrouter') {
|
|
const apiKey = process.env.OPENROUTER_API_KEY || ''
|
|
return new OpenAI({
|
|
apiKey,
|
|
baseURL: process.env.OPENROUTER_BASE_URL || 'https://openrouter.ai/api/v1',
|
|
defaultHeaders: {
|
|
...(process.env.OPENROUTER_SITE_URL
|
|
? { 'HTTP-Referer': process.env.OPENROUTER_SITE_URL }
|
|
: {}),
|
|
...(process.env.OPENROUTER_APP_NAME
|
|
? { 'X-Title': process.env.OPENROUTER_APP_NAME }
|
|
: {}),
|
|
},
|
|
})
|
|
}
|
|
|
|
return new OpenAI({
|
|
apiKey: process.env.OPENAI_API_KEY || '',
|
|
})
|
|
}
|
|
|
|
function getModel(provider: LlmProvider): string {
|
|
if (provider === 'openrouter') {
|
|
return process.env.OPENROUTER_MODEL || 'minimax/minimax-m2.5'
|
|
}
|
|
return process.env.OPENAI_MODEL || 'gpt-5-mini'
|
|
}
|
|
|
|
function hasApiKey(provider: LlmProvider): boolean {
|
|
if (provider === 'openrouter') return !!process.env.OPENROUTER_API_KEY
|
|
return !!process.env.OPENAI_API_KEY
|
|
}
|
|
|
|
async function generateText({
|
|
provider,
|
|
model,
|
|
systemMessage,
|
|
prompt,
|
|
}: {
|
|
provider: LlmProvider
|
|
model: string
|
|
systemMessage: string
|
|
prompt: string
|
|
}) {
|
|
const client = createClient(provider)
|
|
const completion = await client.chat.completions.create({
|
|
model,
|
|
messages: [
|
|
{ role: 'system', content: systemMessage },
|
|
{ role: 'user', content: prompt },
|
|
],
|
|
})
|
|
|
|
return completion.choices[0]?.message?.content || ''
|
|
}
|
|
|
|
export async function POST(req: Request) {
|
|
try {
|
|
const { prompt, type, format } = await req.json()
|
|
const primaryProvider = getProvider()
|
|
const primaryModel = getModel(primaryProvider)
|
|
|
|
if (!prompt) {
|
|
return NextResponse.json({ error: 'Prompt is required' }, { status: 400 })
|
|
}
|
|
|
|
let systemMessage = ''
|
|
|
|
if (type === 'news') {
|
|
systemMessage = `Du bist ein erfahrener Newsletter- und PR-Experte für eine Innung (Handwerksverband).
|
|
Deine Aufgabe ist es, professionelle, ansprechende und informative News-Beiträge zu schreiben.
|
|
Achte auf eine klare Struktur, eine einladende Tonalität und hohe inhaltliche Qualität.
|
|
Das gewünschte Ausgabeformat ist: ${format === 'markdown' ? 'Markdown' : 'Einfacher unformatierter Text'}.`
|
|
} else if (type === 'stelle') {
|
|
systemMessage = `Du bist ein erfahrener HR- und Recruiting-Experte für das Handwerk.
|
|
Deine Aufgabe ist es, attraktive und präzise Stellenanzeigen (Lehrlingsbörse / Jobbörse) zu verfassen.
|
|
Die Stellenanzeige soll Begeisterung wecken und klar die Aufgaben sowie Anforderungen kommunizieren.
|
|
Das gewünschte Ausgabeformat ist: ${format === 'markdown' ? 'Markdown' : 'Einfacher unformatierter Text'}.`
|
|
} else {
|
|
systemMessage = `Du bist ein hilfreicher KI-Assistent. Antworte immer auf Deutsch.`
|
|
}
|
|
|
|
const attempts: Array<{ provider: LlmProvider; model: string; reason: string }> = []
|
|
|
|
if (hasApiKey(primaryProvider)) {
|
|
attempts.push({
|
|
provider: primaryProvider,
|
|
model: primaryModel,
|
|
reason: 'primary',
|
|
})
|
|
}
|
|
|
|
// Fallback requested: if primary fails, try OpenAI GPT-5 mini when OPENAI_API_KEY is present.
|
|
if (primaryProvider !== 'openai' && hasApiKey('openai')) {
|
|
attempts.push({
|
|
provider: 'openai',
|
|
model: 'gpt-5-mini',
|
|
reason: 'fallback_openai',
|
|
})
|
|
}
|
|
|
|
if (attempts.length === 0) {
|
|
return NextResponse.json(
|
|
{ error: 'No AI provider key configured (OPENROUTER_API_KEY or OPENAI_API_KEY).' },
|
|
{ status: 500 }
|
|
)
|
|
}
|
|
|
|
let lastError: any = null
|
|
|
|
for (const attempt of attempts) {
|
|
try {
|
|
const text = await generateText({
|
|
provider: attempt.provider,
|
|
model: attempt.model,
|
|
systemMessage,
|
|
prompt,
|
|
})
|
|
|
|
return NextResponse.json({
|
|
text,
|
|
provider: attempt.provider,
|
|
model: attempt.model,
|
|
fallbackUsed: attempt.reason !== 'primary',
|
|
})
|
|
} catch (error: any) {
|
|
lastError = error
|
|
console.error('AI attempt failed:', {
|
|
provider: attempt.provider,
|
|
model: attempt.model,
|
|
message: error?.message,
|
|
})
|
|
}
|
|
}
|
|
|
|
return NextResponse.json(
|
|
{ error: lastError?.message || 'All AI providers failed' },
|
|
{ status: 500 }
|
|
)
|
|
} catch (error: any) {
|
|
console.error('AI Generate Error:', error)
|
|
return NextResponse.json(
|
|
{ error: error?.message || 'Internal Server Error' },
|
|
{ status: 500 }
|
|
)
|
|
}
|
|
}
|