* Create parse endpoint in collector (#4212) * create parse endpoint in collector * revert cleanup temp util call * lint * remove unused cleanupTempDocuments function * revert slug change minor change for destinations --------- Co-authored-by: timothycarambat <rambat1010@gmail.com> * Add parsed files table and parse server endpoints (#4222) * add workspace_parsed_files table + parse endpoints/models * remove dev api parse endpoint * remove unneeded imports * iterate over all files + remove unneeded update function + update telemetry debounce * Upload UI/UX context window check + frontend alert (#4230) * prompt user to embed if exceeds prompt window + handle embed + handle cancel * add tokenCountEstimate to workspace_parsed_files + optimizations * use util for path locations + use safeJsonParse * add modal for user decision on overflow of context window * lint * dynamic fetching of provider/model combo + inject parsed documents * remove unneeded comments * popup ui for attaching/removing files + warning to embed + wip fetching states on update * remove prop drilling, fetch files/limits directly in attach files popup * rework ux of FE + BE optimizations * fix ux of FE + BE optimizations * Implement bidirectional sync for parsed file states linting small changes and comments * move parse support to another endpoint file simplify calls and loading of records * button borders * enable default users to upload parsed files but NOT embed * delete cascade on user/workspace/thread deletion to remove parsedFileRecord * enable bgworker with "always" jobs and optional document sync jobs orphan document job: Will find any broken reference files to prevent overpollution of the storage folder. This will run 10s after boot and every 12hr after * change run timeout for orphan job to 1m to allow settling before spawning a worker * linting and cleanup pr --------- Co-authored-by: Timothy Carambat <rambat1010@gmail.com> * dev build * fix tooltip hiding during embedding overflow files * prevent crash log from ERRNO on parse files * unused import * update docs link * Migrate parsed-files to GET endpoint patch logic for grabbing models names from utils better handling for undetermined context windows (null instead of Pos_INIFI) UI placeholder for null context windows * patch URL --------- Co-authored-by: Sean Hatfield <seanhatfield5@gmail.com>
75 lines
2.1 KiB
JavaScript
75 lines
2.1 KiB
JavaScript
const { v4 } = require("uuid");
|
|
const {
|
|
createdDate,
|
|
trashFile,
|
|
writeToServerDocuments,
|
|
} = require("../../utils/files");
|
|
const { tokenizeString } = require("../../utils/tokenizer");
|
|
const { default: slugify } = require("slugify");
|
|
const { LocalWhisper } = require("../../utils/WhisperProviders/localWhisper");
|
|
const { OpenAiWhisper } = require("../../utils/WhisperProviders/OpenAiWhisper");
|
|
|
|
const WHISPER_PROVIDERS = {
|
|
openai: OpenAiWhisper,
|
|
local: LocalWhisper,
|
|
};
|
|
|
|
async function asAudio({ fullFilePath = "", filename = "", options = {} }) {
|
|
const WhisperProvider = WHISPER_PROVIDERS.hasOwnProperty(
|
|
options?.whisperProvider
|
|
)
|
|
? WHISPER_PROVIDERS[options?.whisperProvider]
|
|
: WHISPER_PROVIDERS.local;
|
|
|
|
console.log(`-- Working ${filename} --`);
|
|
const whisper = new WhisperProvider({ options });
|
|
const { content, error } = await whisper.processFile(fullFilePath, filename);
|
|
|
|
if (!!error) {
|
|
console.error(`Error encountered for parsing of ${filename}.`);
|
|
trashFile(fullFilePath);
|
|
return {
|
|
success: false,
|
|
reason: error,
|
|
documents: [],
|
|
};
|
|
}
|
|
|
|
if (!content?.length) {
|
|
console.error(`Resulting text content was empty for ${filename}.`);
|
|
trashFile(fullFilePath);
|
|
return {
|
|
success: false,
|
|
reason: `No text content found in ${filename}.`,
|
|
documents: [],
|
|
};
|
|
}
|
|
|
|
const data = {
|
|
id: v4(),
|
|
url: "file://" + fullFilePath,
|
|
title: filename,
|
|
docAuthor: "no author found",
|
|
description: "No description found.",
|
|
docSource: "pdf file uploaded by the user.",
|
|
chunkSource: "",
|
|
published: createdDate(fullFilePath),
|
|
wordCount: content.split(" ").length,
|
|
pageContent: content,
|
|
token_count_estimate: tokenizeString(content),
|
|
};
|
|
|
|
const document = writeToServerDocuments({
|
|
data,
|
|
filename: `${slugify(filename)}-${data.id}`,
|
|
options: { parseOnly: options.parseOnly },
|
|
});
|
|
trashFile(fullFilePath);
|
|
console.log(
|
|
`[SUCCESS]: ${filename} transcribed, converted & ready for embedding.\n`
|
|
);
|
|
return { success: true, reason: null, documents: [document] };
|
|
}
|
|
|
|
module.exports = asAudio;
|