1+ import { setTimeout as sleep } from 'node:timers/promises' ;
12import { slugify } from '../transform/slug.mjs' ;
23
34function normalizeModelResponse ( raw ) {
@@ -74,6 +75,46 @@ function getFinishReason(payload) {
7475 return compactText ( payload ?. choices ?. [ 0 ] ?. finish_reason || '' ) ;
7576}
7677
78+ function createAiError ( message , details = { } ) {
79+ const error = new Error ( message ) ;
80+ Object . assign ( error , details ) ;
81+ return error ;
82+ }
83+
84+ function isRetryableStatus ( status ) {
85+ return status === 408 || status === 409 || status === 425 || status === 429 || status >= 500 ;
86+ }
87+
88+ function buildAttemptPlan ( options ) {
89+ const baseTimeoutMs = Number ( options . aiTimeoutMs ) > 0 ? Number ( options . aiTimeoutMs ) : 180000 ;
90+ const baseMaxTokens = Number ( options . aiMaxTokens ) > 0 ? Number ( options . aiMaxTokens ) : 8192 ;
91+ const expandedMaxTokens = Math . max ( baseMaxTokens , 12288 ) ;
92+ const timeoutCandidates = [ ...new Set ( [ baseTimeoutMs , Math . round ( baseTimeoutMs * 1.5 ) , Math . round ( baseTimeoutMs * 2 ) ] ) ] ;
93+
94+ return [
95+ {
96+ forceJsonObject : true ,
97+ maxTokens : baseMaxTokens ,
98+ timeoutMs : timeoutCandidates [ 0 ]
99+ } ,
100+ {
101+ forceJsonObject : false ,
102+ maxTokens : baseMaxTokens ,
103+ timeoutMs : timeoutCandidates [ Math . min ( 1 , timeoutCandidates . length - 1 ) ]
104+ } ,
105+ {
106+ forceJsonObject : false ,
107+ maxTokens : expandedMaxTokens ,
108+ timeoutMs : timeoutCandidates [ timeoutCandidates . length - 1 ]
109+ }
110+ ] ;
111+ }
112+
113+ function getRetryDelayMs ( options , attemptIndex ) {
114+ const baseDelayMs = Number ( options . aiRetryBackoffMs ) > 0 ? Number ( options . aiRetryBackoffMs ) : 1500 ;
115+ return Math . min ( baseDelayMs * 2 ** attemptIndex , 10000 ) ;
116+ }
117+
77118function buildCategoryGuide ( options ) {
78119 if ( ! options ?. taxonomy || typeof options . taxonomy . buildPromptCategoryGuide !== 'function' ) {
79120 return [ ] ;
@@ -227,20 +268,14 @@ export async function runAiContentPipeline(input, options) {
227268
228269 const endpoint = `${ String ( options . aiBaseUrl || '' ) . replace ( / \/ $ / , '' ) } /chat/completions` ;
229270 const prompt = buildPrompt ( input , options ) ;
230-
231- const requestTimeoutMs = Number ( options . aiTimeoutMs ) > 0 ? Number ( options . aiTimeoutMs ) : 120000 ;
232- const baseMaxTokens = Number ( options . aiMaxTokens ) > 0 ? Number ( options . aiMaxTokens ) : 8192 ;
233- const tokenCandidates = [ ...new Set ( [ baseMaxTokens , Math . max ( baseMaxTokens , 12288 ) ] ) ] ;
234- const attempts = tokenCandidates . flatMap ( ( maxTokens ) => [
235- { maxTokens, forceJsonObject : true } ,
236- { maxTokens, forceJsonObject : false }
237- ] ) ;
271+ const title = compactText ( input ?. title , `document ${ String ( input ?. token || '' ) . slice ( 0 , 8 ) } ` ) ;
272+ const attempts = buildAttemptPlan ( options ) ;
238273
239274 let lastError = null ;
240275 let lastFinishReason = '' ;
241276 let lastPreview = '' ;
242277
243- for ( const attempt of attempts ) {
278+ for ( const [ attemptIndex , attempt ] of attempts . entries ( ) ) {
244279 const requestBody = {
245280 model : options . aiModel ,
246281 temperature : options . aiTemperature ,
@@ -264,7 +299,7 @@ export async function runAiContentPipeline(input, options) {
264299
265300 try {
266301 const controller = new AbortController ( ) ;
267- const timeout = setTimeout ( ( ) => controller . abort ( ) , requestTimeoutMs ) ;
302+ const timeout = setTimeout ( ( ) => controller . abort ( ) , attempt . timeoutMs ) ;
268303 let response ;
269304 try {
270305 response = await fetch ( endpoint , {
@@ -282,7 +317,11 @@ export async function runAiContentPipeline(input, options) {
282317
283318 if ( ! response . ok ) {
284319 const errorText = await response . text ( ) ;
285- throw new Error ( `AI pipeline request failed: ${ response . status } ${ errorText . slice ( 0 , 400 ) } ` ) ;
320+ throw createAiError ( `AI pipeline request failed: ${ response . status } ${ errorText . slice ( 0 , 400 ) } ` , {
321+ code : 'AI_HTTP_ERROR' ,
322+ retryable : isRetryableStatus ( response . status ) ,
323+ status : response . status
324+ } ) ;
286325 }
287326
288327 const payload = await response . json ( ) ;
@@ -305,15 +344,35 @@ export async function runAiContentPipeline(input, options) {
305344 return normalized ;
306345 } catch ( error ) {
307346 if ( error ?. name === 'AbortError' ) {
308- lastError = new Error ( `AI pipeline request timed out after ${ requestTimeoutMs } ms` ) ;
309- continue ;
347+ lastError = createAiError ( `AI pipeline request timed out after ${ attempt . timeoutMs } ms` , {
348+ code : 'AI_TIMEOUT' ,
349+ retryable : true ,
350+ timeoutMs : attempt . timeoutMs
351+ } ) ;
352+ } else {
353+ lastError = error ;
354+ }
355+
356+ const canRetry = attemptIndex < attempts . length - 1 && lastError ?. retryable !== false ;
357+ if ( ! canRetry ) {
358+ break ;
310359 }
311- lastError = error ;
360+
361+ const delayMs = getRetryDelayMs ( options , attemptIndex ) ;
362+ console . warn (
363+ `[feishu-sync] AI retry ${ attemptIndex + 2 } /${ attempts . length } for "${ title } " after ${ lastError . message } (wait ${ delayMs } ms)`
364+ ) ;
365+ await sleep ( delayMs ) ;
312366 }
313367 }
314368
315369 const finishReasonPart = lastFinishReason ? ` finish_reason=${ lastFinishReason } .` : '' ;
316370 const previewPart = lastPreview ? ` preview=${ JSON . stringify ( lastPreview ) } .` : '' ;
317371 const errorMessage = lastError ?. message || 'AI pipeline failed after retries' ;
318- throw new Error ( `${ errorMessage } .${ finishReasonPart } ${ previewPart } ` ) ;
372+ throw createAiError ( `${ errorMessage } .${ finishReasonPart } ${ previewPart } ` , {
373+ code : lastError ?. code || 'AI_PIPELINE_FAILED' ,
374+ retryable : lastError ?. retryable !== false ,
375+ status : lastError ?. status ,
376+ timeoutMs : lastError ?. timeoutMs
377+ } ) ;
319378}
0 commit comments