Inference Example
Example smart contract
Below is a smart contract that utilizes the IVannaInference
interface to run inference on-chain.
import "valence-inference-lib/src/IInference.sol";
contract InferenceExample {
// Execute an ML model from Valence's storage layer, secured by ZKML
function runZkmlModel() public {
ModelInput memory modelInput = ModelInput(
new NumberTensor[](1),
new StringTensor[](0));
Number[] memory numbers = new Number[](2);
numbers[0] = Number(7286679744720459, 17); // 0.07286679744720459
numbers[1] = Number(4486280083656311, 16); // 0.4486280083656311
modelInput.numbers[0] = NumberTensor("input", numbers);
ModelOutput memory output = INFERENCE_CONTRACT.runModel(
IInference.ModelInferenceMode.ZK,
ModelInferenceRequest(
"QmbbzDwqSxZSgkz1EbsNHp2mb67rYeUYHYWJ4wECE24S7A",
modelInput
));
if (output.is_simulation_result == false) {
resultNumber = output.numbers[0].values[0];
} else {
resultNumber = Number(0, 0);
}
}
// Execute a Large Language Model directly in your smart contract
function runLlm() public {
string[] memory stopSequence = new string[](1);
stopSequence[0] = "<end>";
LlmResponse memory llmResult = INFERENCE_CONTRACT.runLlm(
IInference.LlmInferenceMode.VANILLA,
LlmInferenceRequest(
"meta-llama/Meta-Llama-3-8B-Instruct",
"Hello sir, who are you?\n<start>",
1000,
stopSequence,
0
));
if (llmResuklt.is_simulation_result) {
resultString = "empty";
} else {
resultString = llmResult.answer;
}
}
}
Last updated