Skip to content

Commit

Permalink
Merge pull request elizaOS#1558 from Gajesh2007/main
Browse files Browse the repository at this point in the history
feat: added new plugin - zktls - reclaim
  • Loading branch information
fabianhug authored Jan 6, 2025
2 parents 645dc9e + 0167682 commit 5a46b30
Show file tree
Hide file tree
Showing 11 changed files with 3,964 additions and 2,027 deletions.
1 change: 1 addition & 0 deletions agent/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@
"@elizaos/plugin-tee-marlin": "workspace:*",
"@elizaos/plugin-multiversx": "workspace:*",
"@elizaos/plugin-near": "workspace:*",
"@elizaos/plugin-reclaim": "workspace:*",
"@elizaos/plugin-zksync-era": "workspace:*",
"@elizaos/plugin-twitter": "workspace:*",
"@elizaos/plugin-cronoszkevm": "workspace:*",
Expand Down
18 changes: 18 additions & 0 deletions agent/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import { LensAgentClient } from "@elizaos/client-lens";
import { SlackClientInterface } from "@elizaos/client-slack";
import { TelegramClientInterface } from "@elizaos/client-telegram";
import { TwitterClientInterface } from "@elizaos/client-twitter";
import { ReclaimAdapter } from "@elizaos/plugin-reclaim";
import {
AgentRuntime,
CacheManager,
Expand Down Expand Up @@ -524,6 +525,22 @@ export async function createAgent(
);
}

// Initialize Reclaim adapter if environment variables are present
let verifiableInferenceAdapter;
if (
process.env.RECLAIM_APP_ID &&
process.env.RECLAIM_APP_SECRET &&
process.env.VERIFIABLE_INFERENCE_ENABLED === "true"
) {
verifiableInferenceAdapter = new ReclaimAdapter({
appId: process.env.RECLAIM_APP_ID,
appSecret: process.env.RECLAIM_APP_SECRET,
modelProvider: character.modelProvider,
token,
});
elizaLogger.log("Verifiable inference adapter initialized");
}

return new AgentRuntime({
databaseAdapter: db,
token,
Expand Down Expand Up @@ -631,6 +648,7 @@ export async function createAgent(
managers: [],
cacheManager: cache,
fetch: logFetch,
verifiableInferenceAdapter,
});
}

Expand Down
3 changes: 2 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@
},
"pnpm": {
"overrides": {
"onnxruntime-node": "1.20.1"
"onnxruntime-node": "1.20.1",
"viem": "2.21.58"
}
},
"engines": {
Expand Down
90 changes: 72 additions & 18 deletions packages/core/src/generation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ import {
ServiceType,
SearchResponse,
ActionResponse,
IVerifiableInferenceAdapter,
VerifiableInferenceOptions,
VerifiableInferenceResult,
VerifiableInferenceProvider,
TelemetrySettings,
TokenizerType,
} from "./types.ts";
Expand Down Expand Up @@ -181,6 +185,8 @@ export async function generateText({
maxSteps = 1,
stop,
customSystemPrompt,
verifiableInference = process.env.VERIFIABLE_INFERENCE_ENABLED === "true",
verifiableInferenceOptions,
}: {
runtime: IAgentRuntime;
context: string;
Expand All @@ -190,6 +196,9 @@ export async function generateText({
maxSteps?: number;
stop?: string[];
customSystemPrompt?: string;
verifiableInference?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
verifiableInferenceOptions?: VerifiableInferenceOptions;
}): Promise<string> {
if (!context) {
console.error("generateText context is empty");
Expand All @@ -201,8 +210,33 @@ export async function generateText({
elizaLogger.info("Generating text with options:", {
modelProvider: runtime.modelProvider,
model: modelClass,
verifiableInference,
});

// If verifiable inference is requested and adapter is provided, use it
if (verifiableInference && runtime.verifiableInferenceAdapter) {
try {
const result =
await runtime.verifiableInferenceAdapter.generateText(
context,
modelClass,
verifiableInferenceOptions
);

// Verify the proof
const isValid =
await runtime.verifiableInferenceAdapter.verifyProof(result);
if (!isValid) {
throw new Error("Failed to verify inference proof");
}

return result.text;
} catch (error) {
elizaLogger.error("Error in verifiable inference:", error);
throw error;
}
}

const provider = runtime.modelProvider;
const endpoint =
runtime.character.modelEndpointOverride || getEndpoint(provider);
Expand Down Expand Up @@ -345,7 +379,7 @@ export async function generateText({
});

response = openaiResponse;
elizaLogger.debug("Received response from OpenAI model.");
console.log("Received response from OpenAI model.");
break;
}

Expand Down Expand Up @@ -1520,6 +1554,9 @@ export interface GenerationOptions {
stop?: string[];
mode?: "auto" | "json" | "tool";
experimental_providerMetadata?: Record<string, unknown>;
verifiableInference?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
verifiableInferenceOptions?: VerifiableInferenceOptions;
}

/**
Expand Down Expand Up @@ -1551,6 +1588,9 @@ export const generateObject = async ({
schemaDescription,
stop,
mode = "json",
verifiableInference = false,
verifiableInferenceAdapter,
verifiableInferenceOptions,
}: GenerationOptions): Promise<GenerateObjectResult<unknown>> => {
if (!context) {
const errorMessage = "generateObject context is empty";
Expand Down Expand Up @@ -1594,6 +1634,9 @@ export const generateObject = async ({
runtime,
context,
modelClass,
verifiableInference,
verifiableInferenceAdapter,
verifiableInferenceOptions,
});

return response;
Expand All @@ -1619,6 +1662,9 @@ interface ProviderOptions {
modelOptions: ModelSettings;
modelClass: ModelClass;
context: string;
verifiableInference?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
verifiableInferenceOptions?: VerifiableInferenceOptions;
}

/**
Expand All @@ -1630,7 +1676,15 @@ interface ProviderOptions {
export async function handleProvider(
options: ProviderOptions
): Promise<GenerateObjectResult<unknown>> {
const { provider, runtime, context, modelClass } = options;
const {
provider,
runtime,
context,
modelClass,
verifiableInference,
verifiableInferenceAdapter,
verifiableInferenceOptions,
} = options;
switch (provider) {
case ModelProviderName.OPENAI:
case ModelProviderName.ETERNALAI:
Expand Down Expand Up @@ -1681,7 +1735,7 @@ async function handleOpenAI({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const baseURL = models.openai.endpoint || undefined;
Expand All @@ -1691,7 +1745,7 @@ async function handleOpenAI({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand All @@ -1708,7 +1762,7 @@ async function handleAnthropic({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const anthropic = createAnthropic({ apiKey });
Expand All @@ -1717,7 +1771,7 @@ async function handleAnthropic({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand All @@ -1734,7 +1788,7 @@ async function handleGrok({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const grok = createOpenAI({ apiKey, baseURL: models.grok.endpoint });
Expand All @@ -1743,7 +1797,7 @@ async function handleGrok({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand All @@ -1760,7 +1814,7 @@ async function handleGroq({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const groq = createGroq({ apiKey });
Expand All @@ -1769,7 +1823,7 @@ async function handleGroq({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand All @@ -1786,7 +1840,7 @@ async function handleGoogle({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const google = createGoogleGenerativeAI();
Expand All @@ -1795,7 +1849,7 @@ async function handleGoogle({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand All @@ -1812,7 +1866,7 @@ async function handleRedPill({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const redPill = createOpenAI({ apiKey, baseURL: models.redpill.endpoint });
Expand All @@ -1821,7 +1875,7 @@ async function handleRedPill({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand All @@ -1838,7 +1892,7 @@ async function handleOpenRouter({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const openRouter = createOpenAI({
Expand All @@ -1850,7 +1904,7 @@ async function handleOpenRouter({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand All @@ -1866,7 +1920,7 @@ async function handleOllama({
schema,
schemaName,
schemaDescription,
mode,
mode = "json",
modelOptions,
provider,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
Expand All @@ -1879,7 +1933,7 @@ async function handleOllama({
schema,
schemaName,
schemaDescription,
mode,
mode: "json",
...modelOptions,
});
}
Expand Down
17 changes: 17 additions & 0 deletions packages/core/src/runtime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ import {
type Actor,
type Evaluator,
type Memory,
IVerifiableInferenceAdapter,
VerifiableInferenceOptions,
VerifiableInferenceProvider,
} from "./types.ts";
import { stringToUuid } from "./uuid.ts";

Expand Down Expand Up @@ -149,6 +152,8 @@ export class AgentRuntime implements IAgentRuntime {
cacheManager: ICacheManager;
clients: Record<string, any>;

verifiableInferenceAdapter?: IVerifiableInferenceAdapter;

registerMemoryManager(manager: IMemoryManager): void {
if (!manager.tableName) {
throw new Error("Memory manager must have a tableName");
Expand Down Expand Up @@ -230,6 +235,7 @@ export class AgentRuntime implements IAgentRuntime {
speechModelPath?: string;
cacheManager: ICacheManager;
logging?: boolean;
verifiableInferenceAdapter?: IVerifiableInferenceAdapter;
}) {
elizaLogger.info("Initializing AgentRuntime with options:", {
character: opts.character?.name,
Expand Down Expand Up @@ -388,6 +394,8 @@ export class AgentRuntime implements IAgentRuntime {
(opts.evaluators ?? []).forEach((evaluator: Evaluator) => {
this.registerEvaluator(evaluator);
});

this.verifiableInferenceAdapter = opts.verifiableInferenceAdapter;
}

async initialize() {
Expand Down Expand Up @@ -664,6 +672,7 @@ export class AgentRuntime implements IAgentRuntime {
runtime: this,
context,
modelClass: ModelClass.SMALL,
verifiableInferenceAdapter: this.verifiableInferenceAdapter,
});

const evaluators = parseJsonArrayFromText(
Expand Down Expand Up @@ -1296,6 +1305,14 @@ Text: ${attachment.text}
attachments: formattedAttachments,
} as State;
}

getVerifiableInferenceAdapter(): IVerifiableInferenceAdapter | undefined {
return this.verifiableInferenceAdapter;
}

setVerifiableInferenceAdapter(adapter: IVerifiableInferenceAdapter): void {
this.verifiableInferenceAdapter = adapter;
}
}

const formatKnowledge = (knowledge: KnowledgeItem[]) => {
Expand Down
Loading

0 comments on commit 5a46b30

Please sign in to comment.