diff --git a/server/src/routes/generate.ts b/server/src/routes/generate.ts index 59d4cc0..a0d96af 100644 --- a/server/src/routes/generate.ts +++ b/server/src/routes/generate.ts @@ -799,7 +799,7 @@ router.post('/format', authMiddleware, async (req: AuthenticatedRequest, res: Re if (timeSignature) reqJson.timesignature = timeSignature; await wf(reqPath, JSON.stringify(reqJson, null, 2)); - const args: string[] = ['--request', reqPath, '--model', config.acestep.lmModel]; + const args: string[] = ['--request', reqPath, '--lm', config.acestep.lmModel]; const { spawn } = await import('child_process'); const result = await new Promise<{ stdout: string; stderr: string; code: number }>((resolve) => { const proc = spawn(config.acestep.lmBin!, args, { shell: false, stdio: ['ignore', 'pipe', 'pipe'] }); diff --git a/server/src/services/acestep.ts b/server/src/services/acestep.ts index 47e2ca0..238bc46 100644 --- a/server/src/services/acestep.ts +++ b/server/src/services/acestep.ts @@ -649,7 +649,7 @@ async function runViaSpawn( const lmModel = config.acestep.lmModel; if (!lmModel) throw new Error('LM model not found — run models.sh first'); - const lmArgs: string[] = ['--request', requestPath, '--model', lmModel]; + const lmArgs: string[] = ['--request', requestPath, '--lm', lmModel]; const batchSize = Math.min(Math.max(params.batchSize ?? 1, 1), 8); if (batchSize > 1) lmArgs.push('--batch', String(batchSize)); @@ -717,7 +717,7 @@ async function runViaSpawn( const ditArgs: string[] = [ '--request', ...enrichedPaths, - '--text-encoder', textEncoderModel, + '--embedding', textEncoderModel, '--dit', ditModel, '--vae', vaeModel, ]; @@ -1209,7 +1209,7 @@ export async function runUnderstand(audioUrl: string): Promise '--src-audio', srcAudioPath, '--dit', ditModel, '--vae', vaeModel, - '--model', lmModel, + '--lm', lmModel, '-o', outJsonPath, ];