First of all, thank you
I depend on llmfarm_core.swift package.
Code
print("Hello.")
var input_text = "State the meaning of life."
var modelInference:ModelInference
var ai = AI(_modelPath: "/Users/guyanhua/llama-2-7b-chat.Q3_K_S.gguf",_chatName: "chat")
modelInference = ModelInference.LLama_gguf
var params:ModelAndContextParams = .default
params.context = 4095
params.n_threads = 14
//
params.use_metal = false
do{
try ai.loadModel(modelInference,contextParams: params)
var output=""
try ExceptionCather.catchException {
output = try! ai.model.predict(input_text, mainCallback)
}
// llama_save_session_file(ai.model.context,"/Users/guinmoon/dev/alpaca_llama_etc/dump_state.bin",ai.model.session_tokens, ai.model.session_tokens.count)
// llama_save_state(ai.model.context,"/Users/guinmoon/dev/alpaca_llama_etc/dump_state_.bin")
//
print(output)
}catch {
print (error)
}
Output
Hello.
AI init
llama_model_load: error loading model: llama_model_loader: failed to load model from /Users/guyanhua/llama-2-7b-chat.Q3_K_S.gguf
llama_load_model_from_file: failed to load model
modelLoadError
modelLoadError
And model file is exist.How to debug ,can you provide some ideas, please?
First of all, thank you I depend on llmfarm_core.swift package. Code
Output
And model file is exist.How to debug ,can you provide some ideas, please?
Thanks!