diff --git a/package.json b/package.json index 578775a71..da24da7fe 100644 --- a/package.json +++ b/package.json @@ -70,7 +70,6 @@ "noflo-interaction": "^0.3.1", "noflo-nodejs": "^0.15.2", "noflo-objects": "^0.7.0", - "noflo-runtime-base": "^0.13.1", "noflo-strings": "^0.5.1", "reflect-metadata": "0.1.13", "registry-js": "1.15.1", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ba6d6413f..f843dfabf 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -124,9 +124,6 @@ dependencies: noflo-objects: specifier: ^0.7.0 version: 0.7.0 - noflo-runtime-base: - specifier: ^0.13.1 - version: 0.13.1 noflo-strings: specifier: ^0.5.1 version: 0.5.1 @@ -12155,7 +12152,7 @@ packages: dev: true /tr46@0.0.3: - resolution: {integrity: sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=} + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} /trim-repeated@1.0.0: resolution: {integrity: sha512-pkonvlKk8/ZuR0D5tLW8ljt5I8kmxp2XKymhepUeOdCEfKpZaktSArkLHZt76OB1ZvO9bssUsDty4SWhLvZpLg==} @@ -12606,7 +12603,7 @@ packages: engines: {node: '>= 8'} /webidl-conversions@3.0.1: - resolution: {integrity: sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=} + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} /webidl-conversions@4.0.2: resolution: {integrity: sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==} @@ -12818,7 +12815,7 @@ packages: dev: false /whatwg-url@5.0.0: - resolution: {integrity: sha1-lmRU6HZUYuN2RNNib2dCzotwll0=} + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} dependencies: tr46: 0.0.3 webidl-conversions: 3.0.1 diff --git a/src/constants/channels.ts b/src/constants/channels.ts index b0f5a17b2..7eb7e9e7e 100644 --- a/src/constants/channels.ts +++ b/src/constants/channels.ts @@ -69,9 +69,6 @@ export enum WikiChannel { export enum WikiGitWorkspaceChannel { name = 'WikiGitWorkspaceChannel', } -export enum WorkflowChannel { - name = 'WorkflowChannel', -} export enum WorkspaceChannel { focusWorkspace = 'focus-workspace', name = 'WorkspaceChannel', diff --git a/src/services/workflow/NoFloIpcRuntime.ts b/src/services/workflow/NoFloIpcRuntime.ts deleted file mode 100644 index 473ec98ca..000000000 --- a/src/services/workflow/NoFloIpcRuntime.ts +++ /dev/null @@ -1,76 +0,0 @@ -import Base from "noflo-runtime-base"; - -class WebSocketRuntime extends Base { - constructor(options = {}) { - super(options); - this.connections = []; - if (options.catchExceptions) { - process.on('uncaughtException', (err) => { - this.connections.forEach((connection) => { - this.send('network', 'error', err, { - connection, - }); - if (err.stack) { - console.error(err.stack); - } else { - console.error(`Error: ${err.toString()}`); - } - }); - }); - } - - if (options.captureOutput) { - this.startCapture(); - } - } - - send(protocol, topic, payload, context) { - if (!context || !context.connection || !context.connection.connected) { - return; - } - let normalizedPayload = payload; - if (payload instanceof Error) { - normalizedPayload = normalizePayload(payload); - } - if (protocol === 'runtime' && topic === 'packet') { - // With exported port packets we need to go one deeper - normalizedPayload.payload = normalizePayload(normalizedPayload.payload); - } - debugSend(`${protocol}:${topic}`); - context.connection.sendUTF(JSON.stringify({ - protocol, - command: topic, - payload: normalizedPayload, - })); - super.send(protocol, topic, payload, context); - } - - sendAll(protocol, topic, payload) { - this.connections.forEach((connection) => { - this.send(protocol, topic, payload, { - connection, - }); - }); - } - - startCapture() { - this.originalStdOut = process.stdout.write; - process.stdout.write = (string) => { - this.connections.forEach((connection) => { - this.send('network', 'output', { - message: string.replace(/\n$/, ''), - type: 'message', - }, { - connection, - }); - }); - }; - } - - stopCapture() { - if (!this.originalStdOut) { - return; - } - process.stdout.write = this.originalStdOut; - } -} \ No newline at end of file diff --git a/src/services/workflow/index.ts b/src/services/workflow/index.ts deleted file mode 100644 index d3d7c09cb..000000000 --- a/src/services/workflow/index.ts +++ /dev/null @@ -1,144 +0,0 @@ -import { dialog } from 'electron'; -import fs from 'fs-extra'; -import { injectable } from 'inversify'; -import path from 'path'; -import { Observable } from 'rxjs'; -import { ModuleThread, spawn, Worker } from 'threads'; - -// @ts-expect-error it don't want .ts -// eslint-disable-next-line import/no-webpack-loader-syntax -import workerURL from 'threads-plugin/dist/loader?name=llmWorker!./llmWorker.ts'; - -import { LANGUAGE_MODEL_FOLDER } from '@/constants/appPaths'; -import { getExistingParentDirectory } from '@/helpers/findPath'; -import { lazyInject } from '@services/container'; -import { i18n } from '@services/libs/i18n'; -import { logger } from '@services/libs/log'; -import type { INativeService } from '@services/native/interface'; -import { IPreferenceService } from '@services/preferences/interface'; -import serviceIdentifier from '@services/serviceIdentifier'; -import { IWindowService } from '@services/windows/interface'; -import { WindowNames } from '@services/windows/WindowProperties'; -import { ILanguageModelService, ILLMResultPart, ICreateRuntimeOptions } from './interface'; -import { LLMWorker } from './workflowWorker'; - -@injectable() -export class LanguageModel implements ILanguageModelService { - @lazyInject(serviceIdentifier.NativeService) - private readonly nativeService!: INativeService; - - @lazyInject(serviceIdentifier.Window) - private readonly windowService!: IWindowService; - - @lazyInject(serviceIdentifier.Preference) - private readonly preferenceService!: IPreferenceService; - - private llmWorker?: ModuleThread; - - private async initWorker(): Promise { - // eslint-disable-next-line @typescript-eslint/no-unsafe-argument - this.llmWorker = await spawn(new Worker(workerURL), { timeout: 1000 * 60 }); - } - - /** - * Ensure you get a started worker. If not stated, it will await for it to start. - * @param workspaceID - */ - private async getWorker(): Promise> { - if (this.llmWorker === undefined) { - await this.initWorker(); - } else { - return this.llmWorker; - } - if (this.llmWorker === undefined) { - const errorMessage = `Still no llmWorker after init. No running worker, maybe worker failed to start`; - logger.error( - errorMessage, - { - function: 'callWikiIpcServerRoute', - }, - ); - throw new Error(errorMessage); - } - return this.llmWorker; - } - - /** - * Return true if the model exists, false otherwise. And will show a dialog to user if the model does not exist. - * @param modelPath Absolute path to the model - */ - private async checkModelExistsAndWarnUser(modelPath: string): Promise { - const exists = await fs.pathExists(modelPath); - if (!exists) { - const mainWindow = this.windowService.get(WindowNames.main); - if (mainWindow !== undefined) { - let pathToOpen = modelPath; - void dialog - .showMessageBox(mainWindow, { - title: i18n.t('LanguageModel.ModelNotExist'), - message: `${i18n.t('LanguageModel.ModelNotExistDescription')}: ${modelPath}`, - buttons: ['OK', i18n.t('LanguageModel.OpenThisPath')], - cancelId: 0, - defaultId: 1, - }) - .then(async ({ response }) => { - if (response === 1) { - pathToOpen = await getExistingParentDirectory(modelPath); - await this.nativeService.openPath(pathToOpen); - } - }) - // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment - .catch((error) => logger.error('checkModelExistsAndWarnUser failed', { pathToOpen, error })); - } - } - return exists; - } - - public runLLama$(options: ICreateRuntimeOptions): Observable { - const { id: conversationID, prompt, modelName } = options; - return new Observable((subscriber) => { - const getWikiChangeObserverIIFE = async () => { - const worker = await this.getWorker(); - - // const template = `Write a short helloworld in JavaScript.`; - // const prompt = `A chat between a user and a useful assistant. - // USER: ${template} - // ASSISTANT:`; - const { defaultModel } = await this.preferenceService.get('languageModel'); - const modelPath = path.join(LANGUAGE_MODEL_FOLDER, modelName ?? defaultModel['llama.cpp']); - if (!(await this.checkModelExistsAndWarnUser(modelPath))) { - subscriber.error(new Error(`${i18n.t('LanguageModel.ModelNotExist')} ${modelPath}`)); - return; - } - const observable = worker.runLLama$({ prompt, modelPath, conversationID }); - observable.subscribe({ - next: (result) => { - const loggerCommonMeta = { id: result.id, function: 'LanguageModel.runLLama$' }; - - if ('type' in result && result.type === 'result') { - const { token, id } = result; - // prevent the case that the result is from previous or next conversation, where its Observable is not properly closed. - if (id === conversationID) { - subscriber.next({ token, id }); - } - } else if ('level' in result) { - if (result.level === 'error') { - logger.error(`${result.error.message} ${result.error.stack ?? 'no stack'}`, loggerCommonMeta); - } else { - logger.log(result.level, `${result.message}`, loggerCommonMeta); - } - } - }, - error: (error) => { - subscriber.error(error); - }, - complete: () => { - logger.info(`worker observable completed`, { function: 'LanguageModel.runLLama$' }); - subscriber.complete(); - }, - }); - }; - void getWikiChangeObserverIIFE(); - }); - } -} diff --git a/src/services/workflow/interface.ts b/src/services/workflow/interface.ts deleted file mode 100644 index 0417c6a71..000000000 --- a/src/services/workflow/interface.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { WorkflowChannel } from '@/constants/channels'; -import { ProxyPropertyType } from 'electron-ipc-cat/common'; -import type { Observable } from 'rxjs'; - -export interface ILLMResultBase { - /** - * Conversation id. - * Can use this to stop a generation. - * Also this worker is shared across all workspaces, so you can use this to identify which window is the result for. - */ - id: string; -} -export type IWorkflowWorkerResponse = INormalWorkflowLogMessage | IErrorWorkflowLogMessage | IWorkflowWorkerResult; -export interface INormalWorkflowLogMessage extends ILLMResultBase { - level: 'debug' | 'warn' | 'info'; - message: string; - meta: unknown; -} -export interface IErrorWorkflowLogMessage extends ILLMResultBase { - error: Error; - level: 'error'; -} -export interface IWorkflowWorkerResult extends ILLMResultPart { - type: 'result'; -} - -/** - * Part of generate result. - */ -export interface ILLMResultPart extends ILLMResultBase { - token: string; -} - -export interface ICreateRuntimeOptions extends ILLMResultBase { - baseDir: string; - id: string; - label?: string; - namespace?: string; -} - -/** - * Test language model on renderer by: - * ```js - * window.observables.Workflow.runLLama$({ id: '1' }).subscribe({ next: console.log, error: console.error, complete: () => console.warn('completed') }) - * ``` - */ - -/** - * Run language model on a shared worker, and queue requests to the worker. - */ -export interface IWorkflowService { - createRuntime$(options: ICreateRuntimeOptions): Observable; -} -export const WorkflowServiceIPCDescriptor = { - channel: WorkflowChannel.name, - properties: { - createRuntime$: ProxyPropertyType.Function$, - }, -}; diff --git a/src/services/workflow/workflowWorker.ts b/src/services/workflow/workflowWorker.ts deleted file mode 100644 index 72c250add..000000000 --- a/src/services/workflow/workflowWorker.ts +++ /dev/null @@ -1,81 +0,0 @@ -/* eslint-disable @typescript-eslint/no-misused-promises */ -import 'source-map-support/register'; -import { LLM } from 'llama-node'; -import { LLamaCpp, LoadConfig as LLamaLoadConfig } from 'llama-node/dist/llm/llama-cpp.js'; -import { Observable } from 'rxjs'; -import { expose } from 'threads/worker'; -import { ILanguageModelWorkerResponse } from './interface'; - -const DEFAULT_TIMEOUT_DURATION = 1000 * 30; -function runLLama$( - options: { conversationID: string; modelPath: string; prompt: string }, -): Observable { - const { conversationID, modelPath, prompt } = options; - const loggerCommonMeta = { level: 'info' as const, meta: { function: 'llmWorker.runLLama$' }, id: conversationID }; - return new Observable((subscriber) => { - void (async function runLLamaObservableIIFE() { - try { - subscriber.next({ message: 'preparing instance and config', ...loggerCommonMeta }); - const llama = new LLM(LLamaCpp); - const config: LLamaLoadConfig = { - modelPath, - enableLogging: true, - nCtx: 1024, - seed: 0, - f16Kv: false, - logitsAll: false, - vocabOnly: false, - useMlock: false, - embedding: false, - useMmap: true, - nGpuLayers: 0, - }; - subscriber.next({ message: 'loading config', ...loggerCommonMeta, meta: { config, ...loggerCommonMeta.meta } }); - await llama.load(config); - let respondTimeout: NodeJS.Timeout | undefined; - const abortController = new AbortController(); - const updateTimeout = () => { - clearTimeout(respondTimeout); - respondTimeout = setTimeout(() => { - abortController.abort(); - subscriber.complete(); - }, DEFAULT_TIMEOUT_DURATION); - }; - updateTimeout(); - subscriber.next({ message: 'ready to createCompletion', ...loggerCommonMeta }); - await llama.createCompletion( - { - nThreads: 4, - nTokPredict: 2048, - topK: 40, - topP: 0.1, - temp: 0.2, - // repeatPenalty: 1, - prompt, - }, - (response) => { - const { completed, token } = response; - updateTimeout(); - subscriber.next({ type: 'result', token, id: conversationID }); - if (completed) { - clearTimeout(respondTimeout); - subscriber.complete(); - } - }, - abortController.signal, - ); - subscriber.next({ message: 'createCompletion completed', ...loggerCommonMeta }); - } catch (error) { - if (error instanceof Error) { - subscriber.next({ level: 'error', error, id: conversationID }); - } else { - subscriber.next({ level: 'error', error: new Error(String(error)), id: conversationID }); - } - } - })(); - }); -} - -const llmWorker = { runLLama$ }; -export type LLMWorker = typeof llmWorker; -expose(llmWorker);