插件

报告

5 星标

649 下载量

用于在远程 LM Studio 实例中使用模型的示例生成器插件

src / generator.ts

import { type Chat, type GeneratorController, LMStudioClient } from "@lmstudio/sdk";
import { configSchematics, globalConfigSchematics } from "./config";

export async function generate(ctl: GeneratorController, chat: Chat) {
  const globalPluginConfig = ctl.getGlobalPluginConfig(globalConfigSchematics);
  const specifiedClientIdentifier = globalPluginConfig.get("clientIdentifier");
  const specifiedClientPasskey = globalPluginConfig.get("clientPasskey");
  await using remoteClient = new LMStudioClient({
    baseUrl: globalPluginConfig.get("baseUrl"),
    clientIdentifier: specifiedClientIdentifier === "" ? undefined : specifiedClientIdentifier,
    clientPasskey: specifiedClientPasskey === "" ? undefined : specifiedClientPasskey,
  });

  const pluginConfig = ctl.getPluginConfig(configSchematics);
  const modelKey = pluginConfig.get("modelKey");
  const temperature = pluginConfig.get("temperature");

  const model = await remoteClient.llm.model(modelKey, { signal: ctl.abortSignal });

  await model.respond(chat, {
    rawTools: {
      type: "toolArray",
      tools: ctl.getToolDefinitions(),
    },
    temperature,
    onPredictionFragment: fragment => ctl.fragmentGenerated(fragment.content, fragment),
    onToolCallRequestStart: () => ctl.toolCallGenerationStarted(),
    onToolCallRequestNameReceived: (_callId, name) => ctl.toolCallGenerationNameReceived(name),
    onToolCallRequestArgumentFragmentGenerated: (_callId, content) =>
      ctl.toolCallGenerationArgumentFragmentGenerated(content),
    onToolCallRequestEnd: (_callId, info) => ctl.toolCallGenerationEnded(info.toolCallRequest),
    onToolCallRequestFailure: (_callId, error) => ctl.toolCallGenerationFailed(error),
    signal: ctl.abortSignal,
  });
}