patch: implement @lunamidori hotfix for LocalAI streaming chunk overflows (#433)
* patch: implement @lunamidori hotfix for LocalAI streaming chunk overflows resolves #416 * change log to error log * log trace * lint
This commit is contained in:
parent
d4f4d85492
commit
37cdb845a4
@ -253,7 +253,16 @@ function handleStreamResponses(response, stream, responseProps) {
|
||||
} catch {}
|
||||
|
||||
if (!validJSON) {
|
||||
chunk += message;
|
||||
// It can be possible that the chunk decoding is running away
|
||||
// and the message chunk fails to append due to string length.
|
||||
// In this case abort the chunk and reset so we can continue.
|
||||
// ref: https://github.com/Mintplex-Labs/anything-llm/issues/416
|
||||
try {
|
||||
chunk += message;
|
||||
} catch (e) {
|
||||
console.error(`Chunk appending error`, e);
|
||||
chunk = "";
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
chunk = "";
|
||||
|
||||
Loading…
Reference in New Issue
Block a user