ggerganov / llama.cpp

LLM inference in C/C++
MIT License
66.96k stars 9.62k forks source link

Mac build with metal flag --main-gpu #8886

Closed akaraon8bit closed 1 month ago

akaraon8bit commented 2 months ago

What happened?

% sw_vers    
ProductName:        macOS
ProductVersion:     14.6
BuildVersion:       23G80
10:43 akara@nneka /Users/akara/currentporoject/llama.cpp
% system_profiler SPDisplaysDataType
Graphics/Displays:

    Intel UHD Graphics 630:

      Chipset Model: Intel UHD Graphics 630
      Type: GPU
      Bus: Built-In
      VRAM (Dynamic, Max): 1536 MB
      Vendor: Intel
      Device ID: 0x3e9b
      Revision ID: 0x0002
      Automatic Graphics Switching: Supported
      gMux Version: 5.0.0
      Metal Support: Metal 3

    AMD Radeon Pro 5600M:

      Chipset Model: AMD Radeon Pro 5600M
      Type: GPU
      Bus: PCIe
      PCIe Lane Width: x16
      VRAM (Total): 8 GB
      Vendor: AMD (0x1002)
      Device ID: 0x7360
      Revision ID: 0x0041
      ROM Revision: 113-D3000E-192
      VBIOS Version: 113-D3000A0U-015
      Option ROM Version: 113-D3000A0U-015
      EFI Driver Version: 01.A1.192
      Automatic Graphics Switching: Supported
      gMux Version: 5.0.0
      Metal Support: Metal 3

I tried to apply --main-gpu 1 index it fails to pick up and utilize the AMD Radeon Pro 5600M gpu 8gb and got chunk of ```$ggml_metal_graph_compute: command buffer 0 failed with status 5 error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored) ,ggml_metal_graph_compute: command buffer 0 failed with status 5 error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored) Hggml_metal_graph_compute: command buffer 0 failed with status 5 error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)


### Name and Version

 ./llama-cli --version
version: 3506 (76614f35)
built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0

### What operating system are you seeing the problem on?

_No response_

### Relevant log output

```shell
./llama-cli -m /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q5_0.gguf -p 'write me a message for the woman i love '  -c 2048  --n-gpu-layers 999 --main-gpu 1
Log start
main: build = 3506 (76614f35)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1722949734
llama_model_loader: loaded meta data with 23 key-value pairs and 291 tensors from /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q5_0.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = .
llama_model_loader: - kv   2:                           llama.vocab_size u32              = 128258
llama_model_loader: - kv   3:                       llama.context_length u32              = 8192
llama_model_loader: - kv   4:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   5:                          llama.block_count u32              = 32
llama_model_loader: - kv   6:                  llama.feed_forward_length u32              = 14336
llama_model_loader: - kv   7:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   8:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   9:              llama.attention.head_count_kv u32              = 8
llama_model_loader: - kv  10:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  11:                       llama.rope.freq_base f32              = 500000.000000
llama_model_loader: - kv  12:                          general.file_type u32              = 8
llama_model_loader: - kv  13:                       tokenizer.ggml.model str              = gpt2
llama_model_loader: - kv  14:                      tokenizer.ggml.tokens arr[str,128258]  = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv  15:                      tokenizer.ggml.scores arr[f32,128258]  = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  16:                  tokenizer.ggml.token_type arr[i32,128258]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv  17:                      tokenizer.ggml.merges arr[str,280147]  = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
llama_model_loader: - kv  18:                tokenizer.ggml.bos_token_id u32              = 128000
llama_model_loader: - kv  19:                tokenizer.ggml.eos_token_id u32              = 128256
llama_model_loader: - kv  20:            tokenizer.ggml.padding_token_id u32              = 128001
llama_model_loader: - kv  21:                    tokenizer.chat_template str              = {% set loop_messages = messages %}{% ...
llama_model_loader: - kv  22:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q5_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: missing pre-tokenizer type, using: 'default'
llm_load_vocab:                                             
llm_load_vocab: ************************************        
llm_load_vocab: GENERATION QUALITY WILL BE DEGRADED!        
llm_load_vocab: CONSIDER REGENERATING THE MODEL             
llm_load_vocab: ************************************        
llm_load_vocab:                                             
llm_load_vocab: special tokens cache size = 258
llm_load_vocab: token to piece cache size = 0.8000 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = BPE
llm_load_print_meta: n_vocab          = 128258
llm_load_print_meta: n_merges         = 280147
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 8192
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 8
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 4
llm_load_print_meta: n_embd_k_gqa     = 1024
llm_load_print_meta: n_embd_v_gqa     = 1024
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 14336
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 500000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 8192
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 8B
llm_load_print_meta: model ftype      = Q5_0
llm_load_print_meta: model params     = 8.03 B
llm_load_print_meta: model size       = 5.21 GiB (5.57 BPW) 
llm_load_print_meta: general.name     = .
llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'
llm_load_print_meta: EOS token        = 128256 '<|im_end|>'
llm_load_print_meta: PAD token        = 128001 '<|end_of_text|>'
llm_load_print_meta: LF token         = 128 'Ä'
llm_load_print_meta: EOT token        = 128256 '<|im_end|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: ggml ctx size =    0.27 MiB
ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 2056.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 4104.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  1713.98 MiB, ( 5818.28 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =    76.97 MiB, ( 5895.25 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size
llm_load_tensors: offloading 32 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 33/33 layers to GPU
llm_load_tensors:      Metal buffer size =  4988.00 MiB
llm_load_tensors:        CPU buffer size =   344.44 MiB
........................................................................................
llama_new_context_with_model: n_ctx      = 2048
llama_new_context_with_model: n_batch    = 2048
llama_new_context_with_model: n_ubatch   = 512
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base  = 500000.0
llama_new_context_with_model: freq_scale = 1
ggml_metal_init: allocating
ggml_metal_init: found device: AMD Radeon Pro 5600M
ggml_metal_init: found device: Intel(R) UHD Graphics 630
ggml_metal_init: picking default device: Intel(R) UHD Graphics 630
ggml_metal_init: using embedded metal library
ggml_metal_init: GPU name:   Intel(R) UHD Graphics 630
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)
ggml_metal_init: GPU family: MTLGPUFamilyMetal3  (5001)
ggml_metal_init: simdgroup reduction support   = true
ggml_metal_init: simdgroup matrix mul. support = false
ggml_metal_init: hasUnifiedMemory              = true
ggml_metal_init: recommendedMaxWorkingSetSize  =  1610.61 MB
ggml_metal_init: skipping kernel_mul_mm_f32_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_f16_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q8_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q2_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q3_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q6_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_m_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_nl_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f32_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f16_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q8_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q2_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q3_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q6_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xs_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_m_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_nl_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_xs_f32              (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h64            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h80            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h96            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h112           (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h128           (not supported)
llama_kv_cache_init:      Metal KV buffer size =   256.00 MiB
llama_new_context_with_model: KV self size  =  256.00 MiB, K (f16):  128.00 MiB, V (f16):  128.00 MiB
llama_new_context_with_model:        CPU  output buffer size =     0.49 MiB
llama_new_context_with_model:      Metal compute buffer size =   258.50 MiB
llama_new_context_with_model:        CPU compute buffer size =    12.01 MiB
llama_new_context_with_model: graph nodes  = 1030
llama_new_context_with_model: graph splits = 2
ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Caused GPU Timeout Error (00000002:kIOAccelCommandBufferCallbackErrorTimeout)

system_info: n_threads = 8 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | 
sampling: 
    repeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000
    top_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800
    mirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000
sampling order: 
CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature 
generate: n_ctx = 2048, n_batch = 2048, n_predict = -1, n_keep = 0

write me a message for the woman i love ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Caused GPU Timeout Error (00000002:kIOAccelCommandBufferCallbackErrorTimeout)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
<ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
&ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Eggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
=ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Dggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
2ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
%ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Dggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
'ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Bggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Fggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
)ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
-ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
4ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
-ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
>ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
6ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Hggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
5ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
-ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
>ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
!ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
+ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
$ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
9ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
!ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
9ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
<ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
/ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
#ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Bggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
:ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Aggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
'ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
>ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
<ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
:ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
+ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
<ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
3ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
*ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
&ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
#ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
<ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
'ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
2ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
/ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
5ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
9ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
4ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
9ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
5ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Cggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
+ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
4ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
;ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Aggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
%ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Eggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
=ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
>ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
7ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Gggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
%ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
*ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Cggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
7ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
:ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
2ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
:ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
#ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
?ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
*ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Gggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Cggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
%ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
5ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
%ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Dggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
7ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
$ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
&ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
9ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
)ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Bggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Fggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
3ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Bggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
'ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Hggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
1ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
)ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
*ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
5ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Hggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Hggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
:ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
*ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
)ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
?ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
(ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
(ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
6ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
9ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
)ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Gggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Hggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
5ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Hggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
+ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
,ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
(ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
0ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
=ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
2ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
?ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
3ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
<ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Aggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
!ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
%

llama_print_timings:        load time =   11459.01 ms
llama_print_timings:      sample time =     244.84 ms /   130 runs   (    1.88 ms per token,   530.97 tokens per second)
llama_print_timings: prompt eval time =    6479.79 ms /    10 tokens (  647.98 ms per token,     1.54 tokens per second)
llama_print_timings:        eval time =    3062.70 ms /   129 runs   (   23.74 ms per token,    42.12 tokens per second)
llama_print_timings:       total time =   10253.25 ms /   139 tokens
9:09 akara@nneka /Users/akara/currentporoject/llama.cpp
% ./llama-cli -m /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q5_0.gguf -p 'write me a message for the woman i love '  -c 2048  --n-gpu-layers 999 --main-gpu 1
warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the main GPU has no effect.
Log start
main: build = 3506 (76614f35)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1722950709
llama_model_loader: loaded meta data with 23 key-value pairs and 291 tensors from /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q5_0.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = .
llama_model_loader: - kv   2:                           llama.vocab_size u32              = 128258
llama_model_loader: - kv   3:                       llama.context_length u32              = 8192
llama_model_loader: - kv   4:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   5:                          llama.block_count u32              = 32
llama_model_loader: - kv   6:                  llama.feed_forward_length u32              = 14336
llama_model_loader: - kv   7:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   8:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   9:              llama.attention.head_count_kv u32              = 8
llama_model_loader: - kv  10:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  11:                       llama.rope.freq_base f32              = 500000.000000
llama_model_loader: - kv  12:                          general.file_type u32              = 8
llama_model_loader: - kv  13:                       tokenizer.ggml.model str              = gpt2
llama_model_loader: - kv  14:                      tokenizer.ggml.tokens arr[str,128258]  = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv  15:                      tokenizer.ggml.scores arr[f32,128258]  = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  16:                  tokenizer.ggml.token_type arr[i32,128258]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv  17:                      tokenizer.ggml.merges arr[str,280147]  = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
llama_model_loader: - kv  18:                tokenizer.ggml.bos_token_id u32              = 128000
llama_model_loader: - kv  19:                tokenizer.ggml.eos_token_id u32              = 128256
llama_model_loader: - kv  20:            tokenizer.ggml.padding_token_id u32              = 128001
llama_model_loader: - kv  21:                    tokenizer.chat_template str              = {% set loop_messages = messages %}{% ...
llama_model_loader: - kv  22:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q5_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: missing pre-tokenizer type, using: 'default'
llm_load_vocab:                                             
llm_load_vocab: ************************************        
llm_load_vocab: GENERATION QUALITY WILL BE DEGRADED!        
llm_load_vocab: CONSIDER REGENERATING THE MODEL             
llm_load_vocab: ************************************        
llm_load_vocab:                                             
llm_load_vocab: special tokens cache size = 258
llm_load_vocab: token to piece cache size = 0.8000 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = BPE
llm_load_print_meta: n_vocab          = 128258
llm_load_print_meta: n_merges         = 280147
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 8192
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 8
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 4
llm_load_print_meta: n_embd_k_gqa     = 1024
llm_load_print_meta: n_embd_v_gqa     = 1024
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 14336
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 500000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 8192
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 8B
llm_load_print_meta: model ftype      = Q5_0
llm_load_print_meta: model params     = 8.03 B
llm_load_print_meta: model size       = 5.21 GiB (5.57 BPW) 
llm_load_print_meta: general.name     = .
llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'
llm_load_print_meta: EOS token        = 128256 '<|im_end|>'
llm_load_print_meta: PAD token        = 128001 '<|end_of_text|>'
llm_load_print_meta: LF token         = 128 'Ä'
llm_load_print_meta: EOT token        = 128256 '<|im_end|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: ggml ctx size =    0.27 MiB
ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 2056.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 4104.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  1713.98 MiB, ( 5818.28 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =    76.97 MiB, ( 5895.25 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size
llm_load_tensors: offloading 32 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 33/33 layers to GPU
llm_load_tensors:      Metal buffer size =  4988.00 MiB
llm_load_tensors:        CPU buffer size =   344.44 MiB
........................................................................................
llama_new_context_with_model: n_ctx      = 2048
llama_new_context_with_model: n_batch    = 2048
llama_new_context_with_model: n_ubatch   = 512
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base  = 500000.0
llama_new_context_with_model: freq_scale = 1
ggml_metal_init: allocating
ggml_metal_init: found device: AMD Radeon Pro 5600M
ggml_metal_init: found device: Intel(R) UHD Graphics 630
ggml_metal_init: picking default device: Intel(R) UHD Graphics 630
ggml_metal_init: using embedded metal library
ggml_metal_init: GPU name:   Intel(R) UHD Graphics 630
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)
ggml_metal_init: GPU family: MTLGPUFamilyMetal3  (5001)
ggml_metal_init: simdgroup reduction support   = true
ggml_metal_init: simdgroup matrix mul. support = false
ggml_metal_init: hasUnifiedMemory              = true
ggml_metal_init: recommendedMaxWorkingSetSize  =  1610.61 MB
ggml_metal_init: skipping kernel_mul_mm_f32_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_f16_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q8_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q2_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q3_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q6_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_m_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_nl_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f32_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f16_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q8_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q2_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q3_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q6_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xs_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_m_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_nl_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_xs_f32              (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h64            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h80            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h96            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h112           (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h128           (not supported)
llama_kv_cache_init:      Metal KV buffer size =   256.00 MiB
llama_new_context_with_model: KV self size  =  256.00 MiB, K (f16):  128.00 MiB, V (f16):  128.00 MiB
llama_new_context_with_model:        CPU  output buffer size =     0.49 MiB
llama_new_context_with_model:      Metal compute buffer size =   258.50 MiB
llama_new_context_with_model:        CPU compute buffer size =    12.01 MiB
llama_new_context_with_model: graph nodes  = 1030
llama_new_context_with_model: graph splits = 2
ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Caused GPU Timeout Error (00000002:kIOAccelCommandBufferCallbackErrorTimeout)

system_info: n_threads = 8 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | 
sampling: 
    repeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000
    top_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800
    mirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000
sampling order: 
CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature 
generate: n_ctx = 2048, n_batch = 2048, n_predict = -1, n_keep = 0

write me a message for the woman i love ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Caused GPU Timeout Error (00000002:kIOAccelCommandBufferCallbackErrorTimeout)
9ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) 

llama_print_timings:        load time =    7502.90 ms
llama_print_timings:      sample time =     230.13 ms /   635 runs   (    0.36 ms per token,  2759.33 tokens per second)
llama_print_timings: prompt eval time =    6321.67 ms /    10 tokens (  632.17 ms per token,     1.58 tokens per second)
llama_print_timings:        eval time =    3354.89 ms /   634 runs   (    5.29 ms per token,   188.98 tokens per second)
llama_print_timings:       total time =   10368.51 ms /   644 tokens
9:25 akara@nneka /Users/akara/currentporoject/llama.cpp
% ./llama-cli -m /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q5_0.gguf -p 'write me a message for the woman i love '  -c 2048  --n-gpu-layers 999 --main-gpu 1
warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the main GPU has no effect.
Log start
main: build = 3506 (76614f35)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1722950851
llama_model_loader: loaded meta data with 23 key-value pairs and 291 tensors from /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q5_0.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = .
llama_model_loader: - kv   2:                           llama.vocab_size u32              = 128258
llama_model_loader: - kv   3:                       llama.context_length u32              = 8192
llama_model_loader: - kv   4:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   5:                          llama.block_count u32              = 32
llama_model_loader: - kv   6:                  llama.feed_forward_length u32              = 14336
llama_model_loader: - kv   7:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   8:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   9:              llama.attention.head_count_kv u32              = 8
llama_model_loader: - kv  10:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  11:                       llama.rope.freq_base f32              = 500000.000000
llama_model_loader: - kv  12:                          general.file_type u32              = 8
llama_model_loader: - kv  13:                       tokenizer.ggml.model str              = gpt2
llama_model_loader: - kv  14:                      tokenizer.ggml.tokens arr[str,128258]  = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv  15:                      tokenizer.ggml.scores arr[f32,128258]  = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  16:                  tokenizer.ggml.token_type arr[i32,128258]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv  17:                      tokenizer.ggml.merges arr[str,280147]  = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
llama_model_loader: - kv  18:                tokenizer.ggml.bos_token_id u32              = 128000
llama_model_loader: - kv  19:                tokenizer.ggml.eos_token_id u32              = 128256
llama_model_loader: - kv  20:            tokenizer.ggml.padding_token_id u32              = 128001
llama_model_loader: - kv  21:                    tokenizer.chat_template str              = {% set loop_messages = messages %}{% ...
llama_model_loader: - kv  22:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q5_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: missing pre-tokenizer type, using: 'default'
llm_load_vocab:                                             
llm_load_vocab: ************************************        
llm_load_vocab: GENERATION QUALITY WILL BE DEGRADED!        
llm_load_vocab: CONSIDER REGENERATING THE MODEL             
llm_load_vocab: ************************************        
llm_load_vocab:                                             
llm_load_vocab: special tokens cache size = 258
llm_load_vocab: token to piece cache size = 0.8000 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = BPE
llm_load_print_meta: n_vocab          = 128258
llm_load_print_meta: n_merges         = 280147
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 8192
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 8
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 4
llm_load_print_meta: n_embd_k_gqa     = 1024
llm_load_print_meta: n_embd_v_gqa     = 1024
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 14336
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 500000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 8192
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 8B
llm_load_print_meta: model ftype      = Q5_0
llm_load_print_meta: model params     = 8.03 B
llm_load_print_meta: model size       = 5.21 GiB (5.57 BPW) 
llm_load_print_meta: general.name     = .
llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'
llm_load_print_meta: EOS token        = 128256 '<|im_end|>'
llm_load_print_meta: PAD token        = 128001 '<|end_of_text|>'
llm_load_print_meta: LF token         = 128 'Ä'
llm_load_print_meta: EOT token        = 128256 '<|im_end|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: ggml ctx size =    0.27 MiB
ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 2056.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 4104.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  1713.98 MiB, ( 5818.28 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =    76.97 MiB, ( 5895.25 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size
llm_load_tensors: offloading 32 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 33/33 layers to GPU
llm_load_tensors:      Metal buffer size =  4988.00 MiB
llm_load_tensors:        CPU buffer size =   344.44 MiB
........................................................................................
llama_new_context_with_model: n_ctx      = 2048
llama_new_context_with_model: n_batch    = 2048
llama_new_context_with_model: n_ubatch   = 512
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base  = 500000.0
llama_new_context_with_model: freq_scale = 1
ggml_metal_init: allocating
ggml_metal_init: found device: AMD Radeon Pro 5600M
ggml_metal_init: found device: Intel(R) UHD Graphics 630
ggml_metal_init: picking default device: Intel(R) UHD Graphics 630
ggml_metal_init: using embedded metal library
ggml_metal_init: GPU name:   Intel(R) UHD Graphics 630
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)
ggml_metal_init: GPU family: MTLGPUFamilyMetal3  (5001)
ggml_metal_init: simdgroup reduction support   = true
ggml_metal_init: simdgroup matrix mul. support = false
ggml_metal_init: hasUnifiedMemory              = true
ggml_metal_init: recommendedMaxWorkingSetSize  =  1610.61 MB

write me a message for the woman i love ggml_metal_graph_compute: command buffer 0 failed with status 5
error: Caused GPU Timeout Error (00000002:kIOAccelCommandBufferCallbackErrorTimeout)
;ggml_metal_graph_compute: command buffer 0 failed with status 5

error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)
Hggml_metal_graph_compute: command buffer 0 failed with status 5
error: Ignored (for causing prior/excessive GPU errors) (00000004:kIOAccelCommandBufferCallbackErrorSubmissionsIgnored)

llama_print_timings:        load time =    8311.71 ms
llama_print_timings:      sample time =     161.66 ms /   233 runs   (    0.69 ms per token,  1441.25 tokens per second)
llama_print_timings: prompt eval time =    6298.39 ms /    10 tokens (  629.84 ms per token,     1.59 tokens per second)
llama_print_timings:        eval time =    2215.04 ms /   233 runs   (    9.51 ms per token,   105.19 tokens per second)
llama_print_timings:       total time =    8977.78 ms /   243 tokens```
ngxson commented 2 months ago

I don't think AMD GPU is supported on Apple Metal. See: https://github.com/ollama/ollama/issues/1016

akaraon8bit commented 2 months ago

ollama/ollama#1016

well Apple support Metal on AMD gpu the current llama.cpp pick the default gpu modifying llama.cpp/ggml/src/ggml-metal.m id device = MTLCreateSystemDefaultDevice(); work around --main-gpu args index `NSArray<id> *devices = MTLCopyAllDevices(); then index devices[i] am currently working on building GGML_API ggml_backend_t ggml_backend_metal_init(int deviceIndex);tx->backend_metal = ggml_backend_metal_init(model->main_gpu); ggml_metal_init(int deviceIndex, int n_cb)

akaraon8bit commented 2 months ago

I came up with this implementation to select gpu index though my current model spit out rubbish I hope it will be useful to someone or maybe refined to major release #import <sys/sysctl.h> change all relevant function call signature to support the new function ggml_backend_metal_init(model->main_gpu) then you can index metal supported gpu

static struct ggml_metal_context * ggml_metal_init(int deviceIndex, int n_cb) {
    GGML_METAL_LOG_INFO("%s: allocating\n", __func__);

#if TARGET_OS_OSX && !GGML_METAL_NDEBUG
    // Show all the Metal device instances in the system
    NSArray * devices = MTLCopyAllDevices();
    for (id<MTLDevice> device in devices) {
        GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
    }
    [devices release]; // since it was created by a *Copy* C method
#endif

size_t size_arm;
NSMutableString *logMessages = [NSMutableString string];

// Check for Apple Silicon (M1, M2, etc.)
if (sysctlbyname("hw.optional.arm64", NULL, &size_arm, NULL, 0) == 0 && size_arm == 4) {
    int isAppleSilicon = 0;
    sysctlbyname("hw.optional.arm64", &isAppleSilicon, &size_arm, NULL, 0);
    if (isAppleSilicon) {
        [logMessages appendString:@"This Mac is running on an Apple Silicon (M) Series processor."];
    } else {
        [logMessages appendString:@"This Mac is running on an Intel processor."];
    }
} else {
    [logMessages appendString:@"This Mac is running on an Intel processor."];
}

    GGML_METAL_LOG_INFO("%s'%s'\n", __func__, [logMessages UTF8String]);

    // Pick and show default Metal device
    id<MTLDevice> device = MTLCreateSystemDefaultDevice();
    NSString *defaultDeviceName = device.name;
    GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);

     GGML_METAL_LOG_INFO("%s: Passed GPU at index %d:\n", __func__, deviceIndex);

      NSArray<id<MTLDevice>> *alldevices = MTLCopyAllDevices();

// Check if passed device index is within range
    if (deviceIndex<=(alldevices.count -1)) {
      for (NSUInteger i = 0; i < alldevices.count; i++) {
          id<MTLDevice> selectgpu = alldevices[i];
          NSString *deviceName = selectgpu.name;
          // NSLog(@"Device at index %lu: %@", (unsigned long)i, deviceName);
          if (i == deviceIndex) {
            if (![defaultDeviceName isEqualToString:deviceName]) {
              device = selectgpu;
        // NSLog(@"Device at index %lu: %@", (unsigned long)i, deviceName);
        GGML_METAL_LOG_INFO("%s: Picking Index GPU Name: %s\n", __func__, [ deviceName UTF8String]);
    }else{
        [alldevices release];
    }
              break;
          }
      }

}
akaraon8bit commented 2 months ago
 ./llama-cli -m /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q4_K_M.gguf -p 'write me a message for the woman i love '  --main-gpu 0 --color --threads 4 --keep -1  --interactive --simple-io -e --multiline-input --no-display-prompt --conversation 
warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the main GPU has no effect.
Log start
main: build = 3506 (76614f35)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1723137629
llama_model_loader: loaded meta data with 23 key-value pairs and 291 tensors from /Users/akara/currentporoject/huggingface_download/data/dolphin-2.9-llama3-8b.Q4_K_M.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = .
llama_model_loader: - kv   2:                           llama.vocab_size u32              = 128258
llama_model_loader: - kv   3:                       llama.context_length u32              = 8192
llama_model_loader: - kv   4:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   5:                          llama.block_count u32              = 32
llama_model_loader: - kv   6:                  llama.feed_forward_length u32              = 14336
llama_model_loader: - kv   7:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   8:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   9:              llama.attention.head_count_kv u32              = 8
llama_model_loader: - kv  10:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  11:                       llama.rope.freq_base f32              = 500000.000000
llama_model_loader: - kv  12:                          general.file_type u32              = 15
llama_model_loader: - kv  13:                       tokenizer.ggml.model str              = gpt2
llama_model_loader: - kv  14:                      tokenizer.ggml.tokens arr[str,128258]  = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv  15:                      tokenizer.ggml.scores arr[f32,128258]  = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  16:                  tokenizer.ggml.token_type arr[i32,128258]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv  17:                      tokenizer.ggml.merges arr[str,280147]  = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
llama_model_loader: - kv  18:                tokenizer.ggml.bos_token_id u32              = 128000
llama_model_loader: - kv  19:                tokenizer.ggml.eos_token_id u32              = 128256
llama_model_loader: - kv  20:            tokenizer.ggml.padding_token_id u32              = 128001
llama_model_loader: - kv  21:                    tokenizer.chat_template str              = {% set loop_messages = messages %}{% ...
llama_model_loader: - kv  22:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_K:  193 tensors
llama_model_loader: - type q6_K:   33 tensors
llm_load_vocab: missing pre-tokenizer type, using: 'default'
llm_load_vocab:                                             
llm_load_vocab: ************************************        
llm_load_vocab: GENERATION QUALITY WILL BE DEGRADED!        
llm_load_vocab: CONSIDER REGENERATING THE MODEL             
llm_load_vocab: ************************************        
llm_load_vocab:                                             
llm_load_vocab: special tokens cache size = 258
llm_load_vocab: token to piece cache size = 0.8000 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = BPE
llm_load_print_meta: n_vocab          = 128258
llm_load_print_meta: n_merges         = 280147
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 8192
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 8
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 4
llm_load_print_meta: n_embd_k_gqa     = 1024
llm_load_print_meta: n_embd_v_gqa     = 1024
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 14336
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 500000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 8192
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 8B
llm_load_print_meta: model ftype      = Q4_K - Medium
llm_load_print_meta: model params     = 8.03 B
llm_load_print_meta: model size       = 4.58 GiB (4.89 BPW) 
llm_load_print_meta: general.name     = .
llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'
llm_load_print_meta: EOS token        = 128256 '<|im_end|>'
llm_load_print_meta: PAD token        = 128001 '<|end_of_text|>'
llm_load_print_meta: LF token         = 128 'Ä'
llm_load_print_meta: EOT token        = 128256 '<|im_end|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: ggml ctx size =    0.27 MiB
ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 2056.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  2048.00 MiB, ( 4104.30 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size

ggml_backend_metal_log_allocated_size: allocated buffer, size =  1129.48 MiB, ( 5233.78 /  1536.00)ggml_backend_metal_log_allocated_size: warning: current allocated size is greater than the recommended max working set size
llm_load_tensors: offloading 32 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 33/33 layers to GPU
llm_load_tensors:        CPU buffer size =   281.82 MiB
llm_load_tensors:      Metal buffer size =  4403.50 MiB
........................................................................................
llama_new_context_with_model: n_ctx      = 8192
llama_new_context_with_model: n_batch    = 2048
llama_new_context_with_model: n_ubatch   = 512
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base  = 500000.0
llama_new_context_with_model: freq_scale = 1
ggml_metal_init: allocating
ggml_metal_init: found device: AMD Radeon Pro 5600M
ggml_metal_init: found device: Intel(R) UHD Graphics 630
ggml_metal_init'This Mac is running on an Intel processor.'
ggml_metal_init: picking default device: Intel(R) UHD Graphics 630
ggml_metal_init: Passed GPU at index 0:
ggml_metal_init: Picking Index GPU Name: AMD Radeon Pro 5600M
ggml_metal_init: using embedded metal library
ggml_metal_init: GPU name:   AMD Radeon Pro 5600M
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)
ggml_metal_init: GPU family: MTLGPUFamilyMetal3  (5001)
ggml_metal_init: simdgroup reduction support   = true
ggml_metal_init: simdgroup matrix mul. support = false
ggml_metal_init: hasUnifiedMemory              = false
ggml_metal_init: recommendedMaxWorkingSetSize  =  8573.16 MB
ggml_metal_init: skipping kernel_mul_mm_f32_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_f16_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q8_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q2_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q3_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q6_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_m_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_nl_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f32_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f16_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q8_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q2_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q3_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q6_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xs_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_m_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_nl_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_xs_f32              (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h64            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h80            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h96            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h112           (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h128           (not supported)
llama_kv_cache_init:      Metal KV buffer size =  1024.00 MiB
llama_new_context_with_model: KV self size  = 1024.00 MiB, K (f16):  512.00 MiB, V (f16):  512.00 MiB
llama_new_context_with_model:        CPU  output buffer size =     0.49 MiB
llama_new_context_with_model:      Metal compute buffer size =   560.00 MiB
llama_new_context_with_model:        CPU compute buffer size =    24.01 MiB
llama_new_context_with_model: graph nodes  = 1030
llama_new_context_with_model: graph splits = 2
main: chat template example: <|start_header_id|>system<|end_header_id|>

You are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>

Hello<|eot_id|><|start_header_id|>assistant<|end_header_id|>

Hi there<|eot_id|><|start_header_id|>user<|end_header_id|>

How are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

system_info: n_threads = 4 / 16 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | 
main: interactive mode on.
sampling: 
    repeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000
    top_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800
    mirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000
sampling order: 
CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature 
generate: n_ctx = 8192, n_batch = 2048, n_predict = -1, n_keep = 15

== Running in interactive mode. ==
 - Press Ctrl+C to interject at any time.
 - To return control to the AI, end your input with '\'.
 - To return control without starting a new line, end your input with '/'.

> tell me a love story \
+!2*:%^C
> ^C

llama_print_timings:        load time =    9950.98 ms
llama_print_timings:      sample time =       3.15 ms /     6 runs   (    0.53 ms per token,  1904.76 tokens per second)
llama_print_timings: prompt eval time =   36907.86 ms /    30 tokens ( 1230.26 ms per token,     0.81 tokens per second)
llama_print_timings:        eval time =    4881.06 ms /     5 runs   (  976.21 ms per token,     1.02 tokens per second)
llama_print_timings:       total time =   45500.96 ms /    35 tokens
dbl001 commented 2 months ago

My iMac 27" running MacOS 14.6 with an AMD Radeon Pro 5700 XT, MTLCreateSystemDefaultDevice() returning nil. Any idea why MTLCreateSystemDefaultDevice() returns nil?

The AMD GPU works with Pytorch MPS and Tensorflow Metal.

   id<MTLDevice> device = MTLCreateSystemDefaultDevice();
    GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
...
DEBUG: Entering ggml_backend_metal_buffer_from_ptr with size = 4357873664
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
DEBUG: Metal device: (null), maxBufferLength: 0
DEBUG: size_aligned = 4357877760, device.maxBufferLength = 0
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer

Here's my debugging code:

// default buffer
static id<MTLDevice> g_backend_device = nil;
static int g_backend_device_ref_count = 0;

static id<MTLDevice> ggml_backend_metal_get_device(void) {
    if (g_backend_device == nil) {
        g_backend_device = MTLCreateSystemDefaultDevice();
        if (g_backend_device == nil) {
            fprintf(stderr, "Error: MTLCreateSystemDefaultDevice() returned nil\n");

            // Check if Metal is supported
            if (@available(macOS 10.11, *)) {
                fprintf(stderr, "Metal framework is available\n");
            } else {
                fprintf(stderr, "Metal framework is not available on this system\n");
            }

            // List available devices
            NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
            fprintf(stderr, "Available Metal devices:\n");
            for (id<MTLDevice> device in devices) {
                fprintf(stderr, "  %s\n", device.name.UTF8String);
            }

            // Additional system info
            fprintf(stderr, "macOS Version: %s\n", [[[NSProcessInfo processInfo] operatingSystemVersionString] UTF8String]);
        } else {
            fprintf(stderr, "Successfully created Metal device: %s\n", g_backend_device.name.UTF8String);
        }
    }
    g_backend_device_ref_count++;
    return g_backend_device;
}

static void ggml_backend_metal_free_device(void) {
    assert(g_backend_device_ref_count > 0);

    g_backend_device_ref_count--;

    if (g_backend_device_ref_count == 0) {
        [g_backend_device release];
        g_backend_device = nil;
    }
}

Here's my system parameters.

 % system_profiler SPDisplaysDataType
Graphics/Displays:

    AMD Radeon Pro 5700 XT:

      Chipset Model: AMD Radeon Pro 5700 XT
      Type: GPU
      Bus: PCIe
      PCIe Lane Width: x16
      VRAM (Total): 16 GB
      Vendor: AMD (0x1002)
      Device ID: 0x7319
      Revision ID: 0x0040
      ROM Revision: 113-D1820Q-231
      VBIOS Version: 113-D182A2XT-013
      Option ROM Version: 113-D182A2XT-013
      EFI Driver Version: 01.01.231
      Metal Support: Metal 3
      Displays:
        iMac:
          Display Type: Built-In Retina LCD
          Resolution: Retina 5K (5120 x 2880)
          Framebuffer Depth: 30-Bit Color (ARGB2101010)
          Main Display: Yes
          Mirror: Off
          Online: Yes
          Automatically Adjust Brightness: Yes
          Connection Type: Internal

(ai) davidlaxer@BlueDiamond-2 clamav % sw_vers    
ProductName:        macOS
ProductVersion:     14.6
BuildVersion:       23G80

Output trying to run llama.cpp:

 % ./llama-cli -m ./models/7B/ggml-model-q4_0-v2.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt

Log start
main: build = 3152 (7b2f4a7d)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.5.0
main: seed  = 1723228333
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from ./models/7B/ggml-model-q4_0-v2.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = models
llama_model_loader: - kv   2:                       llama.context_length u32              = 2048
llama_model_loader: - kv   3:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   4:                          llama.block_count u32              = 32
llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 11008
llama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   7:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 32
llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001
llama_model_loader: - kv  10:                          general.file_type u32              = 2
llama_model_loader: - kv  11:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr[str,32000]   = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv  15:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: special tokens cache size = 259
llm_load_vocab: token to piece cache size = 0.1684 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 32000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: n_ctx_train      = 2048
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 32
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 4096
llm_load_print_meta: n_embd_v_gqa     = 4096
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-06
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 11008
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 2048
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 7B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 6.74 B
llm_load_print_meta: model size       = 3.56 GiB (4.54 BPW) 
llm_load_print_meta: general.name     = models
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: LF token         = 13 '<0x0A>'
llm_load_tensors: ggml ctx size =    0.30 MiB
max_size: 107520000
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model './models/7B/ggml-model-q4_0-v2.gguf'
main: error: unable to load model
(AI-Feynman) davidlaxer@BlueDiamond-2 llama.cpp % 

Output trying to run ollama:

 %  GIN_MODE=debug OLLAMA_LLM_LIBRARY=metal ./ollama serve

2024/08/09 07:58:11 routes.go:1108: INFO server config env="map[OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_HOST:http://127.0.0.1:11434 OLLAMA_KEEP_ALIVE:5m0s OLLAMA_LLM_LIBRARY:metal OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MODELS:/Users/davidlaxer/.ollama/models OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://*] OLLAMA_RUNNERS_DIR: OLLAMA_SCHED_SPREAD:false OLLAMA_TMPDIR:]"
time=2024-08-09T07:58:11.349-07:00 level=INFO source=images.go:781 msg="total blobs: 48"
time=2024-08-09T07:58:11.354-07:00 level=INFO source=images.go:788 msg="total unused blobs removed: 0"
[GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached.

[GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production.
 - using env:   export GIN_MODE=release
 - using code:  gin.SetMode(gin.ReleaseMode)

[GIN-debug] POST   /api/pull                 --> github.com/ollama/ollama/server.(*Server).PullModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/generate             --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (5 handlers)
[GIN-debug] POST   /api/chat                 --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (5 handlers)
[GIN-debug] POST   /api/embed                --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (5 handlers)
[GIN-debug] POST   /api/embeddings           --> github.com/ollama/ollama/server.(*Server).EmbeddingsHandler-fm (5 handlers)
[GIN-debug] POST   /api/create               --> github.com/ollama/ollama/server.(*Server).CreateModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/push                 --> github.com/ollama/ollama/server.(*Server).PushModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/copy                 --> github.com/ollama/ollama/server.(*Server).CopyModelHandler-fm (5 handlers)
[GIN-debug] DELETE /api/delete               --> github.com/ollama/ollama/server.(*Server).DeleteModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/show                 --> github.com/ollama/ollama/server.(*Server).ShowModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/blobs/:digest        --> github.com/ollama/ollama/server.(*Server).CreateBlobHandler-fm (5 handlers)
[GIN-debug] HEAD   /api/blobs/:digest        --> github.com/ollama/ollama/server.(*Server).HeadBlobHandler-fm (5 handlers)
[GIN-debug] GET    /api/ps                   --> github.com/ollama/ollama/server.(*Server).ProcessHandler-fm (5 handlers)
[GIN-debug] POST   /v1/chat/completions      --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (6 handlers)
[GIN-debug] POST   /v1/completions           --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (6 handlers)
[GIN-debug] POST   /v1/embeddings            --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (6 handlers)
[GIN-debug] GET    /v1/models                --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (6 handlers)
[GIN-debug] GET    /v1/models/:model         --> github.com/ollama/ollama/server.(*Server).ShowModelHandler-fm (6 handlers)
[GIN-debug] GET    /                         --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] GET    /api/tags                 --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (5 handlers)
[GIN-debug] GET    /api/version              --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
[GIN-debug] HEAD   /                         --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] HEAD   /api/tags                 --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (5 handlers)
[GIN-debug] HEAD   /api/version              --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
time=2024-08-09T07:58:11.355-07:00 level=INFO source=routes.go:1155 msg="Listening on 127.0.0.1:11434 (version 0.0.0)"
time=2024-08-09T07:58:11.359-07:00 level=INFO source=payload.go:25 msg=payloadsDir payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.359-07:00 level=INFO source=payload.go:31 msg="extracting embedded files" payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:45 msg="Dynamic LLM libraries [metal vulkan]"
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:46 msg="Override detection logic by setting OLLAMA_LLM_LIBRARY"
time=2024-08-09T07:58:11.408-07:00 level=INFO source=gpu_darwin.go:29 msg="Using Metal GPU" gpu_info="{memInfo:{TotalMemory:0 FreeMemory:0 FreeSwap:0} Library:vulkan Variant:no vector extensions MinimumMemory:0 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
2024-08-09 07:58:11.463 ollama[80670:16332314] Debug: Recommended Max VRAM: 17163091968 bytes
time=2024-08-09T07:58:11.464-07:00 level=INFO source=gpu_darwin.go:40 msg=GpuInfo info="{memInfo:{TotalMemory:17163091968 FreeMemory:17163091968 FreeSwap:0} Library:metal Variant:no vector extensions MinimumMemory:536870912 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
time=2024-08-09T07:58:11.464-07:00 level=INFO source=types.go:105 msg="inference compute" id=0 library=metal compute="" driver=0.0 name="" total="16.0 GiB" available="16.0 GiB"
time=2024-08-09T07:58:17.685-07:00 level=INFO source=gpu_darwin.go:29 msg="Using Metal GPU" gpu_info="{memInfo:{TotalMemory:0 FreeMemory:0 FreeSwap:0} Library:vulkan Variant:no vector extensions MinimumMemory:0 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
2024-08-09 07:58:17.685 ollama[80670:16332314] Debug: Recommended Max VRAM: 17163091968 bytes
time=2024-08-09T07:58:17.685-07:00 level=INFO source=gpu_darwin.go:40 msg=GpuInfo info="{memInfo:{TotalMemory:17163091968 FreeMemory:17163091968 FreeSwap:0} Library:metal Variant:no vector extensions MinimumMemory:536870912 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
time=2024-08-09T07:58:17.704-07:00 level=INFO source=sched.go:710 msg="new model will fit in available VRAM in single GPU, loading" model=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 gpu=0 parallel=4 available=17163091968 required="6.3 GiB"
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Total Physical Memory: 137438953472 bytes
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Page Size: 4096 bytes
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Free Count: 2486554
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Speculative Count: 166137
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Inactive Count: 13571224
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Total Free Memory: 66453155840 bytes
time=2024-08-09T07:58:17.705-07:00 level=INFO source=memory.go:309 msg="offload to metal" layers.requested=-1 layers.model=33 layers.offload=33 layers.split="" memory.available="[16.0 GiB]" memory.required.full="6.3 GiB" memory.required.partial="6.3 GiB" memory.required.kv="1.0 GiB" memory.required.allocations="[6.3 GiB]" memory.weights.total="4.7 GiB" memory.weights.repeating="4.3 GiB" memory.weights.nonrepeating="411.0 MiB" memory.graph.full="560.0 MiB" memory.graph.partial="560.0 MiB"
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:87 msg="availableServers : found" availableServers="map[metal:/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal vulkan:/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan]"
time=2024-08-09T07:58:17.705-07:00 level=INFO msg="User override" OLLAMA_LLM_LIBRARY=metal path=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal
time=2024-08-09T07:58:17.706-07:00 level=INFO msg="starting llama server" cmd="/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server --model /Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 --ctx-size 8192 --batch-size 512 --embedding --log-disable --n-gpu-layers 33 --parallel 4 --port 53110"
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="loaded runners" count=1
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="waiting for llama runner to start responding"
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="waiting for server to become available" status="llm server error"
INFO [main] build info | build=3485 commit="6eeaeba1" tid="0x7ff85169edc0" timestamp=1723215498
INFO [main] system info | n_threads=8 n_threads_batch=-1 system_info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | " tid="0x7ff85169edc0" timestamp=1723215498 total_threads=16
INFO [main] HTTP server listening | hostname="127.0.0.1" n_threads_http="15" port="53110" tid="0x7ff85169edc0" timestamp=1723215498
llama_model_loader: loaded meta data with 29 key-value pairs and 291 tensors from /Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.type str              = model
llama_model_loader: - kv   2:                               general.name str              = Meta Llama 3.1 8B Instruct
llama_model_loader: - kv   3:                           general.finetune str              = Instruct
llama_model_loader: - kv   4:                           general.basename str              = Meta-Llama-3.1
llama_model_loader: - kv   5:                         general.size_label str              = 8B
llama_model_loader: - kv   6:                            general.license str              = llama3.1
llama_model_loader: - kv   7:                               general.tags arr[str,6]       = ["facebook", "meta", "pytorch", "llam...
llama_model_loader: - kv   8:                          general.languages arr[str,8]       = ["en", "de", "fr", "it", "pt", "hi", ...
llama_model_loader: - kv   9:                          llama.block_count u32              = 32
llama_model_loader: - kv  10:                       llama.context_length u32              = 131072
llama_model_loader: - kv  11:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv  12:                  llama.feed_forward_length u32              = 14336
llama_model_loader: - kv  13:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv  14:              llama.attention.head_count_kv u32              = 8
llama_model_loader: - kv  15:                       llama.rope.freq_base f32              = 500000.000000
llama_model_loader: - kv  16:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  17:                          general.file_type u32              = 2
llama_model_loader: - kv  18:                           llama.vocab_size u32              = 128256
llama_model_loader: - kv  19:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv  20:                       tokenizer.ggml.model str              = gpt2
llama_model_loader: - kv  21:                         tokenizer.ggml.pre str              = llama-bpe
llama_model_loader: - kv  22:                      tokenizer.ggml.tokens arr[str,128256]  = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv  23:                  tokenizer.ggml.token_type arr[i32,128256]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv  24:                      tokenizer.ggml.merges arr[str,280147]  = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
llama_model_loader: - kv  25:                tokenizer.ggml.bos_token_id u32              = 128000
llama_model_loader: - kv  26:                tokenizer.ggml.eos_token_id u32              = 128009
llama_model_loader: - kv  27:                    tokenizer.chat_template str              = {% set loop_messages = messages %}{% ...
llama_model_loader: - kv  28:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
time=2024-08-09T07:58:18.211-07:00 level=INFO msg="waiting for server to become available" status="llm server loading model"
llm_load_vocab: special tokens cache size = 256
llm_load_vocab: token to piece cache size = 0.7999 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = BPE
llm_load_print_meta: n_vocab          = 128256
llm_load_print_meta: n_merges         = 280147
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 131072
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 8
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 4
llm_load_print_meta: n_embd_k_gqa     = 1024
llm_load_print_meta: n_embd_v_gqa     = 1024
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 14336
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 500000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 131072
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 8B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 8.03 B
llm_load_print_meta: model size       = 4.33 GiB (4.64 BPW) 
llm_load_print_meta: general.name     = Meta Llama 3.1 8B Instruct
llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'
llm_load_print_meta: EOS token        = 128009 '<|eot_id|>'
llm_load_print_meta: LF token         = 128 'Ä'
llm_load_print_meta: EOT token        = 128009 '<|eot_id|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: ggml ctx size =    0.27 MiB
DEBUG: Entering ggml_backend_metal_buffer_from_ptr with size = 4357873664
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
DEBUG: Metal device: (null), maxBufferLength: 0
DEBUG: size_aligned = 4357877760, device.maxBufferLength = 0
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: exception loading model
libc++abi: terminating due to uncaught exception of type std::runtime_error: unable to allocate backend metal buffer
time=2024-08-09T07:58:18.492-07:00 level=DEBUG msg="llama runner terminated" error="signal: abort trap"
time=2024-08-09T07:58:18.712-07:00 level=ERROR msg="error loading llama server" error="llama runner process has terminated: error:unable to allocate backend metal buffer"
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="triggering expiration for failed load" model=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="runner expired event received" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="got lock to unload" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="stopping llama server"
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="runner released" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="sending an unloaded event" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="ignoring unload event with no pending requests"
[GIN] 2024/08/09 - 07:58:18 | 500 |  1.029959264s |       127.0.0.1 | POST     "/api/embeddings"

A test program.

#import <Metal/Metal.h>
#import <Foundation/Foundation.h>

int main(int argc, const char * argv[]) {
    @autoreleasepool {
        NSLog(@"Starting Metal device creation...");
        NSLog(@"macOS Version: %@", [[NSProcessInfo processInfo] operatingSystemVersionString]);

        id<MTLDevice> device = MTLCreateSystemDefaultDevice();
        if (device) {
            NSLog(@"Successfully created Metal device: %@", device.name);
        } else {
            NSLog(@"Failed to create Metal device");
        }

        NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
        NSLog(@"Number of available Metal devices: %lu", (unsigned long)devices.count);
        for (id<MTLDevice> dev in devices) {
            NSLog(@"  %@ (Headless: %@, Low Power: %@, Removable: %@)",
                  dev.name,
                  dev.isHeadless ? @"Yes" : @"No",
                  dev.isLowPower ? @"Yes" : @"No",
                  dev.isRemovable ? @"Yes" : @"No");

            NSLog(@"    Recommended max working set size: %llu", dev.recommendedMaxWorkingSetSize);
            NSLog(@"    Max transfer rate: %llu", dev.maxTransferRate);
        }

        // Try creating a device manually
        if (devices.count > 0) {
            device = devices[0];
            NSLog(@"Manually selected device: %@", device.name);
        }

        // Check if Metal is supported
        if (@available(macOS 10.11, *)) {
            NSLog(@"Metal is supported on this OS version");
        } else {
            NSLog(@"Metal is not supported on this OS version");
        }

        id<MTLDevice> explicitDevice = MTLCreateSystemDefaultDevice();
        if (explicitDevice) {
            NSLog(@"Explicitly created Metal device: %@", explicitDevice.name);
        } else {
            NSLog(@"Failed to explicitly create Metal device");
        }
    if (device) {
        id<MTLCommandQueue> commandQueue = [device newCommandQueue];
        if (commandQueue) {
        NSLog(@"Successfully created command queue");
        } else {
        NSLog(@"Failed to create command queue");
        }
    }
    }
    return 0;
}
 % /usr/bin/clang -I. -Icommon -D_XOPEN_SOURCE=600 -D_DARWIN_C_SOURCE -DNDEBUG -DGGML_USE_ACCELERATE -DGGML_USE_BLAS -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_LLAMAFILE -DGGML_USE_METAL -D_FORTIFY_SOURCE=2 -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE -fstack-protector-strong -O2 -pipe -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -c test.m -o test.o

% /usr/bin/clang++ test.o -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs -Wl,-rpath,/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -L/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -o test

./test
2024-08-09 19:05:23.563 test[23150:17155609] Starting Metal device creation...
2024-08-09 19:05:23.571 test[23150:17155609] macOS Version: Version 14.6 (Build 23G80)
2024-08-09 19:05:23.585 test[23150:17155609] Failed to create Metal device
2024-08-09 19:05:23.585 test[23150:17155609] Number of available Metal devices: 1
2024-08-09 19:05:23.585 test[23150:17155609]   AMD Radeon Pro 5700 XT (Headless: No, Low Power: No, Removable: No)
2024-08-09 19:05:23.586 test[23150:17155609]     Recommended max working set size: 17163091968
2024-08-09 19:05:23.586 test[23150:17155609]     Max transfer rate: 0
2024-08-09 19:05:23.586 test[23150:17155609] Manually selected device: AMD Radeon Pro 5700 XT
2024-08-09 19:05:23.586 test[23150:17155609] Metal is supported on this OS version
2024-08-09 19:05:23.586 test[23150:17155609] Failed to explicitly create Metal device
2024-08-09 19:05:23.586 test[23150:17155609] Successfully created command queue

% otool -L test                
test:
    /System/Library/Frameworks/Foundation.framework/Versions/C/Foundation (compatibility version 300.0.0, current version 2503.1.0)
    /System/Library/Frameworks/Metal.framework/Versions/A/Metal (compatibility version 1.0.0, current version 343.19.0)
    /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1345.120.2)
    /System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation (compatibility version 150.0.0, current version 2503.1.0)
    /usr/lib/libobjc.A.dylib (compatibility version 1.0.0, current version 228.0.0)
akaraon8bit commented 2 months ago

My iMac 27" running MacOS 14.6 with an AMD Radeon Pro 5700 XT, MTLCreateSystemDefaultDevice() returning nil. Any idea why MTLCreateSystemDefaultDevice() returns nil?

The AMD GPU works with Pytorch MPS and Tensorflow Metal.

   id<MTLDevice> device = MTLCreateSystemDefaultDevice();
    GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
...
DEBUG: Entering ggml_backend_metal_buffer_from_ptr with size = 4357873664
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
DEBUG: Metal device: (null), maxBufferLength: 0
DEBUG: size_aligned = 4357877760, device.maxBufferLength = 0
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer

Here's my debugging code:

// default buffer
static id<MTLDevice> g_backend_device = nil;
static int g_backend_device_ref_count = 0;

static id<MTLDevice> ggml_backend_metal_get_device(void) {
    if (g_backend_device == nil) {
        g_backend_device = MTLCreateSystemDefaultDevice();
        if (g_backend_device == nil) {
            fprintf(stderr, "Error: MTLCreateSystemDefaultDevice() returned nil\n");

            // Check if Metal is supported
            if (@available(macOS 10.11, *)) {
                fprintf(stderr, "Metal framework is available\n");
            } else {
                fprintf(stderr, "Metal framework is not available on this system\n");
            }

            // List available devices
            NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
            fprintf(stderr, "Available Metal devices:\n");
            for (id<MTLDevice> device in devices) {
                fprintf(stderr, "  %s\n", device.name.UTF8String);
            }

            // Additional system info
            fprintf(stderr, "macOS Version: %s\n", [[[NSProcessInfo processInfo] operatingSystemVersionString] UTF8String]);
        } else {
            fprintf(stderr, "Successfully created Metal device: %s\n", g_backend_device.name.UTF8String);
        }
    }
    g_backend_device_ref_count++;
    return g_backend_device;
}

static void ggml_backend_metal_free_device(void) {
    assert(g_backend_device_ref_count > 0);

    g_backend_device_ref_count--;

    if (g_backend_device_ref_count == 0) {
        [g_backend_device release];
        g_backend_device = nil;
    }
}

Here's my system parameters.

 % system_profiler SPDisplaysDataType
Graphics/Displays:

    AMD Radeon Pro 5700 XT:

      Chipset Model: AMD Radeon Pro 5700 XT
      Type: GPU
      Bus: PCIe
      PCIe Lane Width: x16
      VRAM (Total): 16 GB
      Vendor: AMD (0x1002)
      Device ID: 0x7319
      Revision ID: 0x0040
      ROM Revision: 113-D1820Q-231
      VBIOS Version: 113-D182A2XT-013
      Option ROM Version: 113-D182A2XT-013
      EFI Driver Version: 01.01.231
      Metal Support: Metal 3
      Displays:
        iMac:
          Display Type: Built-In Retina LCD
          Resolution: Retina 5K (5120 x 2880)
          Framebuffer Depth: 30-Bit Color (ARGB2101010)
          Main Display: Yes
          Mirror: Off
          Online: Yes
          Automatically Adjust Brightness: Yes
          Connection Type: Internal

(ai) davidlaxer@BlueDiamond-2 clamav % sw_vers    
ProductName:      macOS
ProductVersion:       14.6
BuildVersion:     23G80

Output trying to run llama.cpp:

 % ./llama-cli -m ./models/7B/ggml-model-q4_0-v2.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt

Log start
main: build = 3152 (7b2f4a7d)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.5.0
main: seed  = 1723228333
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from ./models/7B/ggml-model-q4_0-v2.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = models
llama_model_loader: - kv   2:                       llama.context_length u32              = 2048
llama_model_loader: - kv   3:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   4:                          llama.block_count u32              = 32
llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 11008
llama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   7:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 32
llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001
llama_model_loader: - kv  10:                          general.file_type u32              = 2
llama_model_loader: - kv  11:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr[str,32000]   = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv  15:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: special tokens cache size = 259
llm_load_vocab: token to piece cache size = 0.1684 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 32000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: n_ctx_train      = 2048
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 32
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 4096
llm_load_print_meta: n_embd_v_gqa     = 4096
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-06
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 11008
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 2048
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 7B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 6.74 B
llm_load_print_meta: model size       = 3.56 GiB (4.54 BPW) 
llm_load_print_meta: general.name     = models
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: LF token         = 13 '<0x0A>'
llm_load_tensors: ggml ctx size =    0.30 MiB
max_size: 107520000
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model './models/7B/ggml-model-q4_0-v2.gguf'
main: error: unable to load model
(AI-Feynman) davidlaxer@BlueDiamond-2 llama.cpp % 

Output trying to run ollama:

 %  GIN_MODE=debug OLLAMA_LLM_LIBRARY=metal ./ollama serve

2024/08/09 07:58:11 routes.go:1108: INFO server config env="map[OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_HOST:http://127.0.0.1:11434 OLLAMA_KEEP_ALIVE:5m0s OLLAMA_LLM_LIBRARY:metal OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MODELS:/Users/davidlaxer/.ollama/models OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://*] OLLAMA_RUNNERS_DIR: OLLAMA_SCHED_SPREAD:false OLLAMA_TMPDIR:]"
time=2024-08-09T07:58:11.349-07:00 level=INFO source=images.go:781 msg="total blobs: 48"
time=2024-08-09T07:58:11.354-07:00 level=INFO source=images.go:788 msg="total unused blobs removed: 0"
[GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached.

[GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production.
 - using env: export GIN_MODE=release
 - using code:    gin.SetMode(gin.ReleaseMode)

[GIN-debug] POST   /api/pull                 --> github.com/ollama/ollama/server.(*Server).PullModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/generate             --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (5 handlers)
[GIN-debug] POST   /api/chat                 --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (5 handlers)
[GIN-debug] POST   /api/embed                --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (5 handlers)
[GIN-debug] POST   /api/embeddings           --> github.com/ollama/ollama/server.(*Server).EmbeddingsHandler-fm (5 handlers)
[GIN-debug] POST   /api/create               --> github.com/ollama/ollama/server.(*Server).CreateModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/push                 --> github.com/ollama/ollama/server.(*Server).PushModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/copy                 --> github.com/ollama/ollama/server.(*Server).CopyModelHandler-fm (5 handlers)
[GIN-debug] DELETE /api/delete               --> github.com/ollama/ollama/server.(*Server).DeleteModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/show                 --> github.com/ollama/ollama/server.(*Server).ShowModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/blobs/:digest        --> github.com/ollama/ollama/server.(*Server).CreateBlobHandler-fm (5 handlers)
[GIN-debug] HEAD   /api/blobs/:digest        --> github.com/ollama/ollama/server.(*Server).HeadBlobHandler-fm (5 handlers)
[GIN-debug] GET    /api/ps                   --> github.com/ollama/ollama/server.(*Server).ProcessHandler-fm (5 handlers)
[GIN-debug] POST   /v1/chat/completions      --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (6 handlers)
[GIN-debug] POST   /v1/completions           --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (6 handlers)
[GIN-debug] POST   /v1/embeddings            --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (6 handlers)
[GIN-debug] GET    /v1/models                --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (6 handlers)
[GIN-debug] GET    /v1/models/:model         --> github.com/ollama/ollama/server.(*Server).ShowModelHandler-fm (6 handlers)
[GIN-debug] GET    /                         --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] GET    /api/tags                 --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (5 handlers)
[GIN-debug] GET    /api/version              --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
[GIN-debug] HEAD   /                         --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] HEAD   /api/tags                 --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (5 handlers)
[GIN-debug] HEAD   /api/version              --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
time=2024-08-09T07:58:11.355-07:00 level=INFO source=routes.go:1155 msg="Listening on 127.0.0.1:11434 (version 0.0.0)"
time=2024-08-09T07:58:11.359-07:00 level=INFO source=payload.go:25 msg=payloadsDir payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.359-07:00 level=INFO source=payload.go:31 msg="extracting embedded files" payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:45 msg="Dynamic LLM libraries [metal vulkan]"
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:46 msg="Override detection logic by setting OLLAMA_LLM_LIBRARY"
time=2024-08-09T07:58:11.408-07:00 level=INFO source=gpu_darwin.go:29 msg="Using Metal GPU" gpu_info="{memInfo:{TotalMemory:0 FreeMemory:0 FreeSwap:0} Library:vulkan Variant:no vector extensions MinimumMemory:0 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
2024-08-09 07:58:11.463 ollama[80670:16332314] Debug: Recommended Max VRAM: 17163091968 bytes
time=2024-08-09T07:58:11.464-07:00 level=INFO source=gpu_darwin.go:40 msg=GpuInfo info="{memInfo:{TotalMemory:17163091968 FreeMemory:17163091968 FreeSwap:0} Library:metal Variant:no vector extensions MinimumMemory:536870912 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
time=2024-08-09T07:58:11.464-07:00 level=INFO source=types.go:105 msg="inference compute" id=0 library=metal compute="" driver=0.0 name="" total="16.0 GiB" available="16.0 GiB"
time=2024-08-09T07:58:17.685-07:00 level=INFO source=gpu_darwin.go:29 msg="Using Metal GPU" gpu_info="{memInfo:{TotalMemory:0 FreeMemory:0 FreeSwap:0} Library:vulkan Variant:no vector extensions MinimumMemory:0 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
2024-08-09 07:58:17.685 ollama[80670:16332314] Debug: Recommended Max VRAM: 17163091968 bytes
time=2024-08-09T07:58:17.685-07:00 level=INFO source=gpu_darwin.go:40 msg=GpuInfo info="{memInfo:{TotalMemory:17163091968 FreeMemory:17163091968 FreeSwap:0} Library:metal Variant:no vector extensions MinimumMemory:536870912 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
time=2024-08-09T07:58:17.704-07:00 level=INFO source=sched.go:710 msg="new model will fit in available VRAM in single GPU, loading" model=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 gpu=0 parallel=4 available=17163091968 required="6.3 GiB"
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Total Physical Memory: 137438953472 bytes
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Page Size: 4096 bytes
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Free Count: 2486554
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Speculative Count: 166137
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Inactive Count: 13571224
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Total Free Memory: 66453155840 bytes
time=2024-08-09T07:58:17.705-07:00 level=INFO source=memory.go:309 msg="offload to metal" layers.requested=-1 layers.model=33 layers.offload=33 layers.split="" memory.available="[16.0 GiB]" memory.required.full="6.3 GiB" memory.required.partial="6.3 GiB" memory.required.kv="1.0 GiB" memory.required.allocations="[6.3 GiB]" memory.weights.total="4.7 GiB" memory.weights.repeating="4.3 GiB" memory.weights.nonrepeating="411.0 MiB" memory.graph.full="560.0 MiB" memory.graph.partial="560.0 MiB"
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:87 msg="availableServers : found" availableServers="map[metal:/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal vulkan:/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan]"
time=2024-08-09T07:58:17.705-07:00 level=INFO msg="User override" OLLAMA_LLM_LIBRARY=metal path=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal
time=2024-08-09T07:58:17.706-07:00 level=INFO msg="starting llama server" cmd="/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server --model /Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 --ctx-size 8192 --batch-size 512 --embedding --log-disable --n-gpu-layers 33 --parallel 4 --port 53110"
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="loaded runners" count=1
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="waiting for llama runner to start responding"
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="waiting for server to become available" status="llm server error"
INFO [main] build info | build=3485 commit="6eeaeba1" tid="0x7ff85169edc0" timestamp=1723215498
INFO [main] system info | n_threads=8 n_threads_batch=-1 system_info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | " tid="0x7ff85169edc0" timestamp=1723215498 total_threads=16
INFO [main] HTTP server listening | hostname="127.0.0.1" n_threads_http="15" port="53110" tid="0x7ff85169edc0" timestamp=1723215498
llama_model_loader: loaded meta data with 29 key-value pairs and 291 tensors from /Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.type str              = model
llama_model_loader: - kv   2:                               general.name str              = Meta Llama 3.1 8B Instruct
llama_model_loader: - kv   3:                           general.finetune str              = Instruct
llama_model_loader: - kv   4:                           general.basename str              = Meta-Llama-3.1
llama_model_loader: - kv   5:                         general.size_label str              = 8B
llama_model_loader: - kv   6:                            general.license str              = llama3.1
llama_model_loader: - kv   7:                               general.tags arr[str,6]       = ["facebook", "meta", "pytorch", "llam...
llama_model_loader: - kv   8:                          general.languages arr[str,8]       = ["en", "de", "fr", "it", "pt", "hi", ...
llama_model_loader: - kv   9:                          llama.block_count u32              = 32
llama_model_loader: - kv  10:                       llama.context_length u32              = 131072
llama_model_loader: - kv  11:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv  12:                  llama.feed_forward_length u32              = 14336
llama_model_loader: - kv  13:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv  14:              llama.attention.head_count_kv u32              = 8
llama_model_loader: - kv  15:                       llama.rope.freq_base f32              = 500000.000000
llama_model_loader: - kv  16:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  17:                          general.file_type u32              = 2
llama_model_loader: - kv  18:                           llama.vocab_size u32              = 128256
llama_model_loader: - kv  19:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv  20:                       tokenizer.ggml.model str              = gpt2
llama_model_loader: - kv  21:                         tokenizer.ggml.pre str              = llama-bpe
llama_model_loader: - kv  22:                      tokenizer.ggml.tokens arr[str,128256]  = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv  23:                  tokenizer.ggml.token_type arr[i32,128256]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv  24:                      tokenizer.ggml.merges arr[str,280147]  = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
llama_model_loader: - kv  25:                tokenizer.ggml.bos_token_id u32              = 128000
llama_model_loader: - kv  26:                tokenizer.ggml.eos_token_id u32              = 128009
llama_model_loader: - kv  27:                    tokenizer.chat_template str              = {% set loop_messages = messages %}{% ...
llama_model_loader: - kv  28:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
time=2024-08-09T07:58:18.211-07:00 level=INFO msg="waiting for server to become available" status="llm server loading model"
llm_load_vocab: special tokens cache size = 256
llm_load_vocab: token to piece cache size = 0.7999 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = BPE
llm_load_print_meta: n_vocab          = 128256
llm_load_print_meta: n_merges         = 280147
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 131072
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 8
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 4
llm_load_print_meta: n_embd_k_gqa     = 1024
llm_load_print_meta: n_embd_v_gqa     = 1024
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 14336
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 500000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 131072
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 8B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 8.03 B
llm_load_print_meta: model size       = 4.33 GiB (4.64 BPW) 
llm_load_print_meta: general.name     = Meta Llama 3.1 8B Instruct
llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'
llm_load_print_meta: EOS token        = 128009 '<|eot_id|>'
llm_load_print_meta: LF token         = 128 'Ä'
llm_load_print_meta: EOT token        = 128009 '<|eot_id|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: ggml ctx size =    0.27 MiB
DEBUG: Entering ggml_backend_metal_buffer_from_ptr with size = 4357873664
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
DEBUG: Metal device: (null), maxBufferLength: 0
DEBUG: size_aligned = 4357877760, device.maxBufferLength = 0
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: exception loading model
libc++abi: terminating due to uncaught exception of type std::runtime_error: unable to allocate backend metal buffer
time=2024-08-09T07:58:18.492-07:00 level=DEBUG msg="llama runner terminated" error="signal: abort trap"
time=2024-08-09T07:58:18.712-07:00 level=ERROR msg="error loading llama server" error="llama runner process has terminated: error:unable to allocate backend metal buffer"
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="triggering expiration for failed load" model=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="runner expired event received" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="got lock to unload" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="stopping llama server"
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="runner released" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="sending an unloaded event" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="ignoring unload event with no pending requests"
[GIN] 2024/08/09 - 07:58:18 | 500 |  1.029959264s |       127.0.0.1 | POST     "/api/embeddings"

A test program.

#import <Metal/Metal.h>
#import <Foundation/Foundation.h>

int main(int argc, const char * argv[]) {
    @autoreleasepool {
        NSLog(@"Starting Metal device creation...");
        NSLog(@"macOS Version: %@", [[NSProcessInfo processInfo] operatingSystemVersionString]);

        id<MTLDevice> device = MTLCreateSystemDefaultDevice();
        if (device) {
            NSLog(@"Successfully created Metal device: %@", device.name);
        } else {
            NSLog(@"Failed to create Metal device");
        }

        NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
        NSLog(@"Number of available Metal devices: %lu", (unsigned long)devices.count);
        for (id<MTLDevice> dev in devices) {
            NSLog(@"  %@ (Headless: %@, Low Power: %@, Removable: %@)",
                  dev.name,
                  dev.isHeadless ? @"Yes" : @"No",
                  dev.isLowPower ? @"Yes" : @"No",
                  dev.isRemovable ? @"Yes" : @"No");

            NSLog(@"    Recommended max working set size: %llu", dev.recommendedMaxWorkingSetSize);
            NSLog(@"    Max transfer rate: %llu", dev.maxTransferRate);
        }

        // Try creating a device manually
        if (devices.count > 0) {
            device = devices[0];
            NSLog(@"Manually selected device: %@", device.name);
        }

        // Check if Metal is supported
        if (@available(macOS 10.11, *)) {
            NSLog(@"Metal is supported on this OS version");
        } else {
            NSLog(@"Metal is not supported on this OS version");
        }

        id<MTLDevice> explicitDevice = MTLCreateSystemDefaultDevice();
        if (explicitDevice) {
            NSLog(@"Explicitly created Metal device: %@", explicitDevice.name);
        } else {
            NSLog(@"Failed to explicitly create Metal device");
        }
  if (device) {
      id<MTLCommandQueue> commandQueue = [device newCommandQueue];
      if (commandQueue) {
      NSLog(@"Successfully created command queue");
      } else {
      NSLog(@"Failed to create command queue");
      }
  }
    }
    return 0;
}
 % /usr/bin/clang -I. -Icommon -D_XOPEN_SOURCE=600 -D_DARWIN_C_SOURCE -DNDEBUG -DGGML_USE_ACCELERATE -DGGML_USE_BLAS -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_LLAMAFILE -DGGML_USE_METAL -D_FORTIFY_SOURCE=2 -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE -fstack-protector-strong -O2 -pipe -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -c test.m -o test.o

% /usr/bin/clang++ test.o -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs -Wl,-rpath,/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -L/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -o test

./test
2024-08-09 19:05:23.563 test[23150:17155609] Starting Metal device creation...
2024-08-09 19:05:23.571 test[23150:17155609] macOS Version: Version 14.6 (Build 23G80)
2024-08-09 19:05:23.585 test[23150:17155609] Failed to create Metal device
2024-08-09 19:05:23.585 test[23150:17155609] Number of available Metal devices: 1
2024-08-09 19:05:23.585 test[23150:17155609]   AMD Radeon Pro 5700 XT (Headless: No, Low Power: No, Removable: No)
2024-08-09 19:05:23.586 test[23150:17155609]     Recommended max working set size: 17163091968
2024-08-09 19:05:23.586 test[23150:17155609]     Max transfer rate: 0
2024-08-09 19:05:23.586 test[23150:17155609] Manually selected device: AMD Radeon Pro 5700 XT
2024-08-09 19:05:23.586 test[23150:17155609] Metal is supported on this OS version
2024-08-09 19:05:23.586 test[23150:17155609] Failed to explicitly create Metal device
2024-08-09 19:05:23.586 test[23150:17155609] Successfully created command queue

% otool -L test                
test:
  /System/Library/Frameworks/Foundation.framework/Versions/C/Foundation (compatibility version 300.0.0, current version 2503.1.0)
  /System/Library/Frameworks/Metal.framework/Versions/A/Metal (compatibility version 1.0.0, current version 343.19.0)
  /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1345.120.2)
  /System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation (compatibility version 150.0.0, current version 2503.1.0)
  /usr/lib/libobjc.A.dylib (compatibility version 1.0.0, current version 228.0.0)
git clone -b metalmultipleGPUsupport  https://github.com/ifeanyipossibilities/llama.cpp
mkdir build && cd build
cmake .. -DGGML_METAL=ON
cmake --build . --config Release

./llama-cli --main-gpu 0 ...
akaraon8bit commented 2 months ago

My iMac 27" running MacOS 14.6 with an AMD Radeon Pro 5700 XT, MTLCreateSystemDefaultDevice() returning nil. Any idea why MTLCreateSystemDefaultDevice() returns nil?

The AMD GPU works with Pytorch MPS and Tensorflow Metal.

   id<MTLDevice> device = MTLCreateSystemDefaultDevice();
    GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
...
DEBUG: Entering ggml_backend_metal_buffer_from_ptr with size = 4357873664
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
DEBUG: Metal device: (null), maxBufferLength: 0
DEBUG: size_aligned = 4357877760, device.maxBufferLength = 0
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer

Here's my debugging code:

// default buffer
static id<MTLDevice> g_backend_device = nil;
static int g_backend_device_ref_count = 0;

static id<MTLDevice> ggml_backend_metal_get_device(void) {
    if (g_backend_device == nil) {
        g_backend_device = MTLCreateSystemDefaultDevice();
        if (g_backend_device == nil) {
            fprintf(stderr, "Error: MTLCreateSystemDefaultDevice() returned nil\n");

            // Check if Metal is supported
            if (@available(macOS 10.11, *)) {
                fprintf(stderr, "Metal framework is available\n");
            } else {
                fprintf(stderr, "Metal framework is not available on this system\n");
            }

            // List available devices
            NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
            fprintf(stderr, "Available Metal devices:\n");
            for (id<MTLDevice> device in devices) {
                fprintf(stderr, "  %s\n", device.name.UTF8String);
            }

            // Additional system info
            fprintf(stderr, "macOS Version: %s\n", [[[NSProcessInfo processInfo] operatingSystemVersionString] UTF8String]);
        } else {
            fprintf(stderr, "Successfully created Metal device: %s\n", g_backend_device.name.UTF8String);
        }
    }
    g_backend_device_ref_count++;
    return g_backend_device;
}

static void ggml_backend_metal_free_device(void) {
    assert(g_backend_device_ref_count > 0);

    g_backend_device_ref_count--;

    if (g_backend_device_ref_count == 0) {
        [g_backend_device release];
        g_backend_device = nil;
    }
}

Here's my system parameters.

 % system_profiler SPDisplaysDataType
Graphics/Displays:

    AMD Radeon Pro 5700 XT:

      Chipset Model: AMD Radeon Pro 5700 XT
      Type: GPU
      Bus: PCIe
      PCIe Lane Width: x16
      VRAM (Total): 16 GB
      Vendor: AMD (0x1002)
      Device ID: 0x7319
      Revision ID: 0x0040
      ROM Revision: 113-D1820Q-231
      VBIOS Version: 113-D182A2XT-013
      Option ROM Version: 113-D182A2XT-013
      EFI Driver Version: 01.01.231
      Metal Support: Metal 3
      Displays:
        iMac:
          Display Type: Built-In Retina LCD
          Resolution: Retina 5K (5120 x 2880)
          Framebuffer Depth: 30-Bit Color (ARGB2101010)
          Main Display: Yes
          Mirror: Off
          Online: Yes
          Automatically Adjust Brightness: Yes
          Connection Type: Internal

(ai) davidlaxer@BlueDiamond-2 clamav % sw_vers    
ProductName:      macOS
ProductVersion:       14.6
BuildVersion:     23G80

Output trying to run llama.cpp:

 % ./llama-cli -m ./models/7B/ggml-model-q4_0-v2.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt

Log start
main: build = 3152 (7b2f4a7d)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.5.0
main: seed  = 1723228333
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from ./models/7B/ggml-model-q4_0-v2.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = models
llama_model_loader: - kv   2:                       llama.context_length u32              = 2048
llama_model_loader: - kv   3:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   4:                          llama.block_count u32              = 32
llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 11008
llama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   7:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 32
llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001
llama_model_loader: - kv  10:                          general.file_type u32              = 2
llama_model_loader: - kv  11:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr[str,32000]   = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv  15:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: special tokens cache size = 259
llm_load_vocab: token to piece cache size = 0.1684 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 32000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: n_ctx_train      = 2048
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 32
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 4096
llm_load_print_meta: n_embd_v_gqa     = 4096
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-06
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 11008
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 2048
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 7B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 6.74 B
llm_load_print_meta: model size       = 3.56 GiB (4.54 BPW) 
llm_load_print_meta: general.name     = models
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: LF token         = 13 '<0x0A>'
llm_load_tensors: ggml ctx size =    0.30 MiB
max_size: 107520000
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model './models/7B/ggml-model-q4_0-v2.gguf'
main: error: unable to load model
(AI-Feynman) davidlaxer@BlueDiamond-2 llama.cpp % 

Output trying to run ollama:

 %  GIN_MODE=debug OLLAMA_LLM_LIBRARY=metal ./ollama serve

2024/08/09 07:58:11 routes.go:1108: INFO server config env="map[OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_HOST:http://127.0.0.1:11434 OLLAMA_KEEP_ALIVE:5m0s OLLAMA_LLM_LIBRARY:metal OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MODELS:/Users/davidlaxer/.ollama/models OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://*] OLLAMA_RUNNERS_DIR: OLLAMA_SCHED_SPREAD:false OLLAMA_TMPDIR:]"
time=2024-08-09T07:58:11.349-07:00 level=INFO source=images.go:781 msg="total blobs: 48"
time=2024-08-09T07:58:11.354-07:00 level=INFO source=images.go:788 msg="total unused blobs removed: 0"
[GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached.

[GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production.
 - using env: export GIN_MODE=release
 - using code:    gin.SetMode(gin.ReleaseMode)

[GIN-debug] POST   /api/pull                 --> github.com/ollama/ollama/server.(*Server).PullModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/generate             --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (5 handlers)
[GIN-debug] POST   /api/chat                 --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (5 handlers)
[GIN-debug] POST   /api/embed                --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (5 handlers)
[GIN-debug] POST   /api/embeddings           --> github.com/ollama/ollama/server.(*Server).EmbeddingsHandler-fm (5 handlers)
[GIN-debug] POST   /api/create               --> github.com/ollama/ollama/server.(*Server).CreateModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/push                 --> github.com/ollama/ollama/server.(*Server).PushModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/copy                 --> github.com/ollama/ollama/server.(*Server).CopyModelHandler-fm (5 handlers)
[GIN-debug] DELETE /api/delete               --> github.com/ollama/ollama/server.(*Server).DeleteModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/show                 --> github.com/ollama/ollama/server.(*Server).ShowModelHandler-fm (5 handlers)
[GIN-debug] POST   /api/blobs/:digest        --> github.com/ollama/ollama/server.(*Server).CreateBlobHandler-fm (5 handlers)
[GIN-debug] HEAD   /api/blobs/:digest        --> github.com/ollama/ollama/server.(*Server).HeadBlobHandler-fm (5 handlers)
[GIN-debug] GET    /api/ps                   --> github.com/ollama/ollama/server.(*Server).ProcessHandler-fm (5 handlers)
[GIN-debug] POST   /v1/chat/completions      --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (6 handlers)
[GIN-debug] POST   /v1/completions           --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (6 handlers)
[GIN-debug] POST   /v1/embeddings            --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (6 handlers)
[GIN-debug] GET    /v1/models                --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (6 handlers)
[GIN-debug] GET    /v1/models/:model         --> github.com/ollama/ollama/server.(*Server).ShowModelHandler-fm (6 handlers)
[GIN-debug] GET    /                         --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] GET    /api/tags                 --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (5 handlers)
[GIN-debug] GET    /api/version              --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
[GIN-debug] HEAD   /                         --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] HEAD   /api/tags                 --> github.com/ollama/ollama/server.(*Server).ListModelsHandler-fm (5 handlers)
[GIN-debug] HEAD   /api/version              --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
time=2024-08-09T07:58:11.355-07:00 level=INFO source=routes.go:1155 msg="Listening on 127.0.0.1:11434 (version 0.0.0)"
time=2024-08-09T07:58:11.359-07:00 level=INFO source=payload.go:25 msg=payloadsDir payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.359-07:00 level=INFO source=payload.go:31 msg="extracting embedded files" payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:45 msg="Dynamic LLM libraries [metal vulkan]"
time=2024-08-09T07:58:11.408-07:00 level=INFO source=payload.go:46 msg="Override detection logic by setting OLLAMA_LLM_LIBRARY"
time=2024-08-09T07:58:11.408-07:00 level=INFO source=gpu_darwin.go:29 msg="Using Metal GPU" gpu_info="{memInfo:{TotalMemory:0 FreeMemory:0 FreeSwap:0} Library:vulkan Variant:no vector extensions MinimumMemory:0 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
2024-08-09 07:58:11.463 ollama[80670:16332314] Debug: Recommended Max VRAM: 17163091968 bytes
time=2024-08-09T07:58:11.464-07:00 level=INFO source=gpu_darwin.go:40 msg=GpuInfo info="{memInfo:{TotalMemory:17163091968 FreeMemory:17163091968 FreeSwap:0} Library:metal Variant:no vector extensions MinimumMemory:536870912 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
time=2024-08-09T07:58:11.464-07:00 level=INFO source=types.go:105 msg="inference compute" id=0 library=metal compute="" driver=0.0 name="" total="16.0 GiB" available="16.0 GiB"
time=2024-08-09T07:58:17.685-07:00 level=INFO source=gpu_darwin.go:29 msg="Using Metal GPU" gpu_info="{memInfo:{TotalMemory:0 FreeMemory:0 FreeSwap:0} Library:vulkan Variant:no vector extensions MinimumMemory:0 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
2024-08-09 07:58:17.685 ollama[80670:16332314] Debug: Recommended Max VRAM: 17163091968 bytes
time=2024-08-09T07:58:17.685-07:00 level=INFO source=gpu_darwin.go:40 msg=GpuInfo info="{memInfo:{TotalMemory:17163091968 FreeMemory:17163091968 FreeSwap:0} Library:metal Variant:no vector extensions MinimumMemory:536870912 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:0 Name: Compute: DriverMajor:0 DriverMinor:0}"
time=2024-08-09T07:58:17.704-07:00 level=INFO source=sched.go:710 msg="new model will fit in available VRAM in single GPU, loading" model=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 gpu=0 parallel=4 available=17163091968 required="6.3 GiB"
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Total Physical Memory: 137438953472 bytes
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Page Size: 4096 bytes
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Free Count: 2486554
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Speculative Count: 166137
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Inactive Count: 13571224
2024-08-09 07:58:17.705 ollama[80670:16332310] Debug: Total Free Memory: 66453155840 bytes
time=2024-08-09T07:58:17.705-07:00 level=INFO source=memory.go:309 msg="offload to metal" layers.requested=-1 layers.model=33 layers.offload=33 layers.split="" memory.available="[16.0 GiB]" memory.required.full="6.3 GiB" memory.required.partial="6.3 GiB" memory.required.kv="1.0 GiB" memory.required.allocations="[6.3 GiB]" memory.weights.total="4.7 GiB" memory.weights.repeating="4.3 GiB" memory.weights.nonrepeating="411.0 MiB" memory.graph.full="560.0 MiB" memory.graph.partial="560.0 MiB"
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:56 msg="gpuPayloadsDir: " payloadsDir=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:74 msg="Available servers found" file=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan/ollama_llama_server
time=2024-08-09T07:58:17.705-07:00 level=INFO source=payload.go:87 msg="availableServers : found" availableServers="map[metal:/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal vulkan:/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/vulkan]"
time=2024-08-09T07:58:17.705-07:00 level=INFO msg="User override" OLLAMA_LLM_LIBRARY=metal path=/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal
time=2024-08-09T07:58:17.706-07:00 level=INFO msg="starting llama server" cmd="/var/folders/3n/56fpv14n4wj0c1l1sb106pzw0000gn/T/ollama4175290649/runners/metal/ollama_llama_server --model /Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 --ctx-size 8192 --batch-size 512 --embedding --log-disable --n-gpu-layers 33 --parallel 4 --port 53110"
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="loaded runners" count=1
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="waiting for llama runner to start responding"
time=2024-08-09T07:58:17.709-07:00 level=INFO msg="waiting for server to become available" status="llm server error"
INFO [main] build info | build=3485 commit="6eeaeba1" tid="0x7ff85169edc0" timestamp=1723215498
INFO [main] system info | n_threads=8 n_threads_batch=-1 system_info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | " tid="0x7ff85169edc0" timestamp=1723215498 total_threads=16
INFO [main] HTTP server listening | hostname="127.0.0.1" n_threads_http="15" port="53110" tid="0x7ff85169edc0" timestamp=1723215498
llama_model_loader: loaded meta data with 29 key-value pairs and 291 tensors from /Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.type str              = model
llama_model_loader: - kv   2:                               general.name str              = Meta Llama 3.1 8B Instruct
llama_model_loader: - kv   3:                           general.finetune str              = Instruct
llama_model_loader: - kv   4:                           general.basename str              = Meta-Llama-3.1
llama_model_loader: - kv   5:                         general.size_label str              = 8B
llama_model_loader: - kv   6:                            general.license str              = llama3.1
llama_model_loader: - kv   7:                               general.tags arr[str,6]       = ["facebook", "meta", "pytorch", "llam...
llama_model_loader: - kv   8:                          general.languages arr[str,8]       = ["en", "de", "fr", "it", "pt", "hi", ...
llama_model_loader: - kv   9:                          llama.block_count u32              = 32
llama_model_loader: - kv  10:                       llama.context_length u32              = 131072
llama_model_loader: - kv  11:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv  12:                  llama.feed_forward_length u32              = 14336
llama_model_loader: - kv  13:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv  14:              llama.attention.head_count_kv u32              = 8
llama_model_loader: - kv  15:                       llama.rope.freq_base f32              = 500000.000000
llama_model_loader: - kv  16:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  17:                          general.file_type u32              = 2
llama_model_loader: - kv  18:                           llama.vocab_size u32              = 128256
llama_model_loader: - kv  19:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv  20:                       tokenizer.ggml.model str              = gpt2
llama_model_loader: - kv  21:                         tokenizer.ggml.pre str              = llama-bpe
llama_model_loader: - kv  22:                      tokenizer.ggml.tokens arr[str,128256]  = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv  23:                  tokenizer.ggml.token_type arr[i32,128256]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv  24:                      tokenizer.ggml.merges arr[str,280147]  = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
llama_model_loader: - kv  25:                tokenizer.ggml.bos_token_id u32              = 128000
llama_model_loader: - kv  26:                tokenizer.ggml.eos_token_id u32              = 128009
llama_model_loader: - kv  27:                    tokenizer.chat_template str              = {% set loop_messages = messages %}{% ...
llama_model_loader: - kv  28:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
time=2024-08-09T07:58:18.211-07:00 level=INFO msg="waiting for server to become available" status="llm server loading model"
llm_load_vocab: special tokens cache size = 256
llm_load_vocab: token to piece cache size = 0.7999 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = BPE
llm_load_print_meta: n_vocab          = 128256
llm_load_print_meta: n_merges         = 280147
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 131072
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 8
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 4
llm_load_print_meta: n_embd_k_gqa     = 1024
llm_load_print_meta: n_embd_v_gqa     = 1024
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 14336
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 500000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 131072
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 8B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 8.03 B
llm_load_print_meta: model size       = 4.33 GiB (4.64 BPW) 
llm_load_print_meta: general.name     = Meta Llama 3.1 8B Instruct
llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'
llm_load_print_meta: EOS token        = 128009 '<|eot_id|>'
llm_load_print_meta: LF token         = 128 'Ä'
llm_load_print_meta: EOT token        = 128009 '<|eot_id|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: ggml ctx size =    0.27 MiB
DEBUG: Entering ggml_backend_metal_buffer_from_ptr with size = 4357873664
Error: MTLCreateSystemDefaultDevice() returned nil
Metal framework is available
Available Metal devices:
  AMD Radeon Pro 5700 XT
macOS Version: Version 14.6 (Build 23G80)
DEBUG: Metal device: (null), maxBufferLength: 0
DEBUG: size_aligned = 4357877760, device.maxBufferLength = 0
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: exception loading model
libc++abi: terminating due to uncaught exception of type std::runtime_error: unable to allocate backend metal buffer
time=2024-08-09T07:58:18.492-07:00 level=DEBUG msg="llama runner terminated" error="signal: abort trap"
time=2024-08-09T07:58:18.712-07:00 level=ERROR msg="error loading llama server" error="llama runner process has terminated: error:unable to allocate backend metal buffer"
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="triggering expiration for failed load" model=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="runner expired event received" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.712-07:00 level=DEBUG msg="got lock to unload" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="stopping llama server"
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="runner released" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="sending an unloaded event" modelPath=/Users/davidlaxer/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87
time=2024-08-09T07:58:18.713-07:00 level=DEBUG msg="ignoring unload event with no pending requests"
[GIN] 2024/08/09 - 07:58:18 | 500 |  1.029959264s |       127.0.0.1 | POST     "/api/embeddings"

A test program.

#import <Metal/Metal.h>
#import <Foundation/Foundation.h>

int main(int argc, const char * argv[]) {
    @autoreleasepool {
        NSLog(@"Starting Metal device creation...");
        NSLog(@"macOS Version: %@", [[NSProcessInfo processInfo] operatingSystemVersionString]);

        id<MTLDevice> device = MTLCreateSystemDefaultDevice();
        if (device) {
            NSLog(@"Successfully created Metal device: %@", device.name);
        } else {
            NSLog(@"Failed to create Metal device");
        }

        NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
        NSLog(@"Number of available Metal devices: %lu", (unsigned long)devices.count);
        for (id<MTLDevice> dev in devices) {
            NSLog(@"  %@ (Headless: %@, Low Power: %@, Removable: %@)",
                  dev.name,
                  dev.isHeadless ? @"Yes" : @"No",
                  dev.isLowPower ? @"Yes" : @"No",
                  dev.isRemovable ? @"Yes" : @"No");

            NSLog(@"    Recommended max working set size: %llu", dev.recommendedMaxWorkingSetSize);
            NSLog(@"    Max transfer rate: %llu", dev.maxTransferRate);
        }

        // Try creating a device manually
        if (devices.count > 0) {
            device = devices[0];
            NSLog(@"Manually selected device: %@", device.name);
        }

        // Check if Metal is supported
        if (@available(macOS 10.11, *)) {
            NSLog(@"Metal is supported on this OS version");
        } else {
            NSLog(@"Metal is not supported on this OS version");
        }

        id<MTLDevice> explicitDevice = MTLCreateSystemDefaultDevice();
        if (explicitDevice) {
            NSLog(@"Explicitly created Metal device: %@", explicitDevice.name);
        } else {
            NSLog(@"Failed to explicitly create Metal device");
        }
  if (device) {
      id<MTLCommandQueue> commandQueue = [device newCommandQueue];
      if (commandQueue) {
      NSLog(@"Successfully created command queue");
      } else {
      NSLog(@"Failed to create command queue");
      }
  }
    }
    return 0;
}
 % /usr/bin/clang -I. -Icommon -D_XOPEN_SOURCE=600 -D_DARWIN_C_SOURCE -DNDEBUG -DGGML_USE_ACCELERATE -DGGML_USE_BLAS -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_LLAMAFILE -DGGML_USE_METAL -D_FORTIFY_SOURCE=2 -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE -fstack-protector-strong -O2 -pipe -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -c test.m -o test.o

% /usr/bin/clang++ test.o -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs -Wl,-rpath,/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -L/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -o test

./test
2024-08-09 19:05:23.563 test[23150:17155609] Starting Metal device creation...
2024-08-09 19:05:23.571 test[23150:17155609] macOS Version: Version 14.6 (Build 23G80)
2024-08-09 19:05:23.585 test[23150:17155609] Failed to create Metal device
2024-08-09 19:05:23.585 test[23150:17155609] Number of available Metal devices: 1
2024-08-09 19:05:23.585 test[23150:17155609]   AMD Radeon Pro 5700 XT (Headless: No, Low Power: No, Removable: No)
2024-08-09 19:05:23.586 test[23150:17155609]     Recommended max working set size: 17163091968
2024-08-09 19:05:23.586 test[23150:17155609]     Max transfer rate: 0
2024-08-09 19:05:23.586 test[23150:17155609] Manually selected device: AMD Radeon Pro 5700 XT
2024-08-09 19:05:23.586 test[23150:17155609] Metal is supported on this OS version
2024-08-09 19:05:23.586 test[23150:17155609] Failed to explicitly create Metal device
2024-08-09 19:05:23.586 test[23150:17155609] Successfully created command queue

% otool -L test                
test:
  /System/Library/Frameworks/Foundation.framework/Versions/C/Foundation (compatibility version 300.0.0, current version 2503.1.0)
  /System/Library/Frameworks/Metal.framework/Versions/A/Metal (compatibility version 1.0.0, current version 343.19.0)
  /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1345.120.2)
  /System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation (compatibility version 150.0.0, current version 2503.1.0)
  /usr/lib/libobjc.A.dylib (compatibility version 1.0.0, current version 228.0.0)

try this

git clone -b metalmultipleGPUsupport  https://github.com/ifeanyipossibilities/llama.cpp
mkdir build && cd build
cmake .. -DGGML_METAL=ON
cmake --build . --config Release
eriktrom commented 2 months ago

thank you @akaraon8bit for communicating what seems to be difficult for apple to do themselves :)

dbl001 commented 2 months ago

@akaraon8bit Thanks for your response. Same error with you branch.

% cmake .. -DGGML_METAL=ON        
-- The C compiler identification is AppleClang 15.0.0.15000309
-- The CXX compiler identification is AppleClang 15.0.0.15000309
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found Git: /opt/local/bin/git (found version "2.46.0")
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success
-- Found Threads: TRUE
-- Accelerate framework found
-- Metal framework found
-- The ASM compiler identification is AppleClang
-- Found assembler: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
-- Found OpenMP_C: -Xclang -fopenmp (found version "5.0")
-- Found OpenMP_CXX: -Xclang -fopenmp (found version "5.0")
-- Found OpenMP: TRUE (found version "5.0")
-- OpenMP found
-- Looking for dgemm_
-- Looking for dgemm_ - found
-- Found BLAS: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Accelerate.framework
-- BLAS found, Libraries: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Accelerate.framework
-- BLAS found, Includes: 
-- Using llamafile
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with GGML_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: x86_64
-- x86 detected
-- Configuring done (10.4s)
-- Generating done (0.5s)
-- Build files have been written to: /Users/davidlaxer/llama.cpp/build
% ./bin//llama-cli -m ~/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:"                             
Log start
main: build = 3563 (066996d2)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1723308999
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from /Users/davidlaxer/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = models
llama_model_loader: - kv   2:                       llama.context_length u32              = 2048
llama_model_loader: - kv   3:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   4:                          llama.block_count u32              = 32
llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 11008
llama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   7:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 32
llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001
llama_model_loader: - kv  10:                          general.file_type u32              = 2
llama_model_loader: - kv  11:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr[str,32000]   = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv  15:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: special tokens cache size = 3
llm_load_vocab: token to piece cache size = 0.1684 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 32000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 2048
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 32
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 4096
llm_load_print_meta: n_embd_v_gqa     = 4096
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-06
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 11008
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 2048
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 7B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 6.74 B
llm_load_print_meta: model size       = 3.56 GiB (4.54 BPW) 
llm_load_print_meta: general.name     = models
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: LF token         = 13 '<0x0A>'
llm_load_print_meta: max token length = 48
llm_load_tensors: ggml ctx size =    0.27 MiB
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model '/Users/davidlaxer/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf'
main: error: unable to load model

The simple test.m program fails, llama.cpp fails, but PyTorch works. Curious.

ifeanyipossibilities commented 2 months ago

@akaraon8bit Thanks for your response. Same error with you branch.

% cmake .. -DGGML_METAL=ON        
-- The C compiler identification is AppleClang 15.0.0.15000309
-- The CXX compiler identification is AppleClang 15.0.0.15000309
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found Git: /opt/local/bin/git (found version "2.46.0")
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success
-- Found Threads: TRUE
-- Accelerate framework found
-- Metal framework found
-- The ASM compiler identification is AppleClang
-- Found assembler: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
-- Found OpenMP_C: -Xclang -fopenmp (found version "5.0")
-- Found OpenMP_CXX: -Xclang -fopenmp (found version "5.0")
-- Found OpenMP: TRUE (found version "5.0")
-- OpenMP found
-- Looking for dgemm_
-- Looking for dgemm_ - found
-- Found BLAS: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Accelerate.framework
-- BLAS found, Libraries: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Accelerate.framework
-- BLAS found, Includes: 
-- Using llamafile
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with GGML_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: x86_64
-- x86 detected
-- Configuring done (10.4s)
-- Generating done (0.5s)
-- Build files have been written to: /Users/davidlaxer/llama.cpp/build
% ./bin//llama-cli -m ~/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:"                             
Log start
main: build = 3563 (066996d2)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1723308999
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from /Users/davidlaxer/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = models
llama_model_loader: - kv   2:                       llama.context_length u32              = 2048
llama_model_loader: - kv   3:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   4:                          llama.block_count u32              = 32
llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 11008
llama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   7:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 32
llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001
llama_model_loader: - kv  10:                          general.file_type u32              = 2
llama_model_loader: - kv  11:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr[str,32000]   = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv  15:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: special tokens cache size = 3
llm_load_vocab: token to piece cache size = 0.1684 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 32000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 2048
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 32
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 4096
llm_load_print_meta: n_embd_v_gqa     = 4096
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-06
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 11008
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 2048
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 7B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 6.74 B
llm_load_print_meta: model size       = 3.56 GiB (4.54 BPW) 
llm_load_print_meta: general.name     = models
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: LF token         = 13 '<0x0A>'
llm_load_print_meta: max token length = 48
llm_load_tensors: ggml ctx size =    0.27 MiB
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model '/Users/davidlaxer/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf'
main: error: unable to load model

The simple test.m program fails, llama.cpp fails, but PyTorch works. Curious.

Use the flag --main-gpu 0

dbl001 commented 2 months ago
% ./bin//llama-cli -m ~/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" --main-gpu 0
warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the main GPU has no effect.
Log start
main: build = 3563 (066996d2)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1723310502
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from /Users/davidlaxer/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = models
llama_model_loader: - kv   2:                       llama.context_length u32              = 2048
llama_model_loader: - kv   3:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   4:                          llama.block_count u32              = 32
llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 11008
llama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   7:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 32
llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001
llama_model_loader: - kv  10:                          general.file_type u32              = 2
llama_model_loader: - kv  11:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr[str,32000]   = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv  15:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: special tokens cache size = 3
llm_load_vocab: token to piece cache size = 0.1684 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 32000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 2048
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 32
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 4096
llm_load_print_meta: n_embd_v_gqa     = 4096
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-06
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 11008
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 2048
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 7B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 6.74 B
llm_load_print_meta: model size       = 3.56 GiB (4.54 BPW) 
llm_load_print_meta: general.name     = models
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: LF token         = 13 '<0x0A>'
llm_load_print_meta: max token length = 48
llm_load_tensors: ggml ctx size =    0.27 MiB
ggml_backend_metal_buffer_from_ptr: error: failed to allocate buffer, size =     0.00 MiB
llama_model_load: error loading model: unable to allocate backend metal buffer
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model '/Users/davidlaxer/llama.cpp-orig/models/7B/ggml-model-q4_0-v2.gguf'
main: error: unable to load model
dbl001 commented 2 months ago

What happens on your Mac if you compile, link and run my test.m?

#import <Metal/Metal.h>
#import <Foundation/Foundation.h>
#import <CoreGraphics/CoreGraphics.h>
#import <dlfcn.h>

static id<MTLDevice> createCustomMTLDevice(void) {
    NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
    for (id<MTLDevice> dev in devices) {
        if (![dev isLowPower] && ![dev isHeadless]) {
            return dev;
        }
    }
    return nil;
}

int main(int argc, const char * argv[]) {
    (void)argc; // Suppress unused parameter warning
    (void)argv; // Suppress unused parameter warning

    @autoreleasepool {
        NSLog(@"Starting Metal framework loading...");
        void* metalHandle = dlopen("/System/Library/Frameworks/Metal.framework/Metal", RTLD_LAZY);
        if (!metalHandle) {
            NSLog(@"Failed to load Metal framework: %s", dlerror());
            return 1;
        }
        NSLog(@"Successfully loaded Metal framework");

        CGRect dummyRect = CGRectMake(0, 0, 100, 100);
        NSLog(@"Dummy CGRect: %@", NSStringFromRect(*(NSRect *)&dummyRect));

        NSLog(@"macOS Version: %@", [[NSProcessInfo processInfo] operatingSystemVersionString]);

        NSLog(@"About to call MTLCreateSystemDefaultDevice()");
        id<MTLDevice> device = MTLCreateSystemDefaultDevice();
        if (device) {
            NSLog(@"Successfully created Metal device: %@", device.name);
        } else {
            NSError *error = [NSError errorWithDomain:NSOSStatusErrorDomain code:0 userInfo:nil];
            NSLog(@"Failed to create Metal device. Error domain: %@, code: %ld", error.domain, (long)error.code);
        }

        NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
        NSLog(@"Number of available Metal devices: %lu", (unsigned long)devices.count);
        for (id<MTLDevice> dev in devices) {
            NSLog(@"  %@ (Headless: %@, Low Power: %@, Removable: %@)",
                  dev.name,
                  dev.isHeadless ? @"Yes" : @"No",
                  dev.isLowPower ? @"Yes" : @"No",
                  dev.isRemovable ? @"Yes" : @"No");

            NSLog(@"    Recommended max working set size: %llu", dev.recommendedMaxWorkingSetSize);
            NSLog(@"    Max transfer rate: %llu", dev.maxTransferRate);
        }

        if (devices.count > 0) {
            device = devices[0];
            NSLog(@"Manually selected device: %@", device.name);
        }

        if (@available(macOS 10.11, *)) {
            NSLog(@"Metal is supported on this OS version");
        } else {
            NSLog(@"Metal is not supported on this OS version");
        }

        id<MTLDevice> explicitDevice = MTLCreateSystemDefaultDevice();
        if (explicitDevice) {
            NSLog(@"Explicitly created Metal device: %@", explicitDevice.name);
        } else {
            NSLog(@"Failed to explicitly create Metal device");
        }

        if (device) {
            id<MTLCommandQueue> commandQueue = [device newCommandQueue];
            if (commandQueue) {
                NSLog(@"Successfully created command queue");
            } else {
                NSLog(@"Failed to create command queue");
            }
        }

        id<MTLDevice> customDevice = createCustomMTLDevice();
        if (customDevice) {
            NSLog(@"Successfully created custom Metal device: %@", customDevice.name);
        } else {
            NSLog(@"Failed to create custom Metal device");
        }

        dlclose(metalHandle);
    }
    return 0;
}

export CC=/usr/bin/clang
export OBJC=/usr/bin/clang
export CC_FOR_BUILD=/usr/bin/clang
export OBJC_FOR_BUILD=/usr/bin/clang

/usr/bin/clang -I. -Icommon -D_XOPEN_SOURCE=600 -D_DARWIN_C_SOURCE -DNDEBUG -DGGML_USE_ACCELERATE -DGGML_USE_BLAS -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_LLAMAFILE -DGGML_USE_METAL -D_FORTIFY_SOURCE=2 -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -std=c11 -fPIC -O3 -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -pthread -march=native -mtune=native -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE -fstack-protector-strong -O2 -pipe -isystem /Users/davidlaxer/anaconda3/envs/AI-Feynman/include -c test.m -o test.o

/usr/bin/clang++ test.o -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework CoreGraphics -Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs -Wl,-rpath,/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -L/Users/davidlaxer/anaconda3/envs/AI-Feynman/lib -o test

./test                
2024-08-10 11:36:39.295 test[68671:3834892] Starting Metal framework loading...
2024-08-10 11:36:39.296 test[68671:3834892] Successfully loaded Metal framework
2024-08-10 11:36:39.296 test[68671:3834892] Dummy CGRect: {{0, 0}, {100, 100}}
2024-08-10 11:36:39.299 test[68671:3834892] macOS Version: Version 14.6.1 (Build 23G93)
2024-08-10 11:36:39.299 test[68671:3834892] About to call MTLCreateSystemDefaultDevice()
2024-08-10 11:36:39.307 test[68671:3834892] Failed to create Metal device. Error domain: NSOSStatusErrorDomain, code: 0
2024-08-10 11:36:39.307 test[68671:3834892] Number of available Metal devices: 1
2024-08-10 11:36:39.307 test[68671:3834892]   AMD Radeon Pro 5700 XT (Headless: No, Low Power: No, Removable: No)
2024-08-10 11:36:39.307 test[68671:3834892]     Recommended max working set size: 17163091968
2024-08-10 11:36:39.307 test[68671:3834892]     Max transfer rate: 0
2024-08-10 11:36:39.307 test[68671:3834892] Manually selected device: AMD Radeon Pro 5700 XT
2024-08-10 11:36:39.307 test[68671:3834892] Metal is supported on this OS version
2024-08-10 11:36:39.307 test[68671:3834892] Failed to explicitly create Metal device
2024-08-10 11:36:39.308 test[68671:3834892] Successfully created command queue
2024-08-10 11:36:39.308 test[68671:3834892] Successfully created custom Metal device: AMD Radeon Pro 5700 XT
dbl001 commented 2 months ago

MTLCreateSystemDefaultDevice() was always returning nil. (I'm still not sure why.)

Before:

 id<MTLDevice> device = MTLCreateSystemDefaultDevice(); with

After:

< static id<MTLDevice> createCustomMTLDevice(void) {
     NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
     for (id<MTLDevice> dev in devices) {
        if (![dev isLowPower] && ![dev isHeadless]) {
            return dev;
        }
     }
     return nil;
}

Now llama.cpp is running on my AMD GPU:

./llama-cli -m ~/llama2.cpp/models/7B/ggml-model-q4_0-v2.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" --main-gpu 0
warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the main GPU has no effect.
Log start
main: build = 3152 (7b2f4a7d)
main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for x86_64-apple-darwin23.6.0
main: seed  = 1723474209
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from /Users/davidlaxer/llama2.cpp/models/7B/ggml-model-q4_0-v2.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.name str              = models
llama_model_loader: - kv   2:                       llama.context_length u32              = 2048
llama_model_loader: - kv   3:                     llama.embedding_length u32              = 4096
llama_model_loader: - kv   4:                          llama.block_count u32              = 32
llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 11008
llama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv   7:                 llama.attention.head_count u32              = 32
llama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 32
llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001
llama_model_loader: - kv  10:                          general.file_type u32              = 2
llama_model_loader: - kv  11:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr[str,32000]   = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv  15:               general.quantization_version u32              = 2
llama_model_loader: - type  f32:   65 tensors
llama_model_loader: - type q4_0:  225 tensors
llama_model_loader: - type q6_K:    1 tensors
llm_load_vocab: special tokens cache size = 259
llm_load_vocab: token to piece cache size = 0.1684 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 32000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: n_ctx_train      = 2048
llm_load_print_meta: n_embd           = 4096
llm_load_print_meta: n_head           = 32
llm_load_print_meta: n_head_kv        = 32
llm_load_print_meta: n_layer          = 32
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 4096
llm_load_print_meta: n_embd_v_gqa     = 4096
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-06
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 11008
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 2048
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: model type       = 7B
llm_load_print_meta: model ftype      = Q4_0
llm_load_print_meta: model params     = 6.74 B
llm_load_print_meta: model size       = 3.56 GiB (4.54 BPW) 
llm_load_print_meta: general.name     = models
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: LF token         = 13 '<0x0A>'
llm_load_tensors: ggml ctx size =    0.30 MiB
max_size: 107520000
Successfully created Metal device: AMD Radeon Pro 5700 XT
ggml_backend_metal_log_allocated_size: allocated buffer, size =  3577.56 MiB, ( 3577.56 / 16368.00)
llm_load_tensors: offloading 32 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 33/33 layers to GPU
llm_load_tensors:      Metal buffer size =  3577.56 MiB
llm_load_tensors:        CPU buffer size =    70.31 MiB
..................................................................................................
llama_new_context_with_model: n_ctx      = 2048
llama_new_context_with_model: n_batch    = 2048
llama_new_context_with_model: n_ubatch   = 512
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base  = 10000.0
llama_new_context_with_model: freq_scale = 1
ggml_metal_init: allocating
ggml_metal_init: found device: AMD Radeon Pro 5700 XT
ggml_metal_init: picking custom device: AMD Radeon Pro 5700 XT
ggml_metal_init: default.metallib not found, loading from source
ggml_metal_init: GGML_METAL_PATH_RESOURCES = nil
ggml_metal_init: loading '/Users/davidlaxer/llama2.cpp/ggml-metal.metal'
ggml_metal_init: GPU name:   AMD Radeon Pro 5700 XT
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)
ggml_metal_init: GPU family: MTLGPUFamilyMetal3  (5001)
ggml_metal_init: simdgroup reduction support   = true
ggml_metal_init: simdgroup matrix mul. support = false
ggml_metal_init: hasUnifiedMemory              = false
ggml_metal_init: recommendedMaxWorkingSetSize  = 17163.09 MB
ggml_metal_init: skipping kernel_mul_mm_f32_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_f16_f32                    (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_1_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q8_0_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q2_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q3_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q4_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q5_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_q6_K_f32                   (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_xxs_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq3_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq2_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_s_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq1_m_f32                  (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_nl_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_iq4_xs_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f32_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_f16_f32                 (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_1_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q8_0_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q2_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q3_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q4_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q5_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_q6_K_f32                (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_xs_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_xxs_f32             (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq3_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq2_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_s_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq1_m_f32               (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_nl_f32              (not supported)
ggml_metal_init: skipping kernel_mul_mm_id_iq4_xs_f32              (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h64            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h80            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h96            (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h112           (not supported)
ggml_metal_init: skipping kernel_flash_attn_ext_f16_h128           (not supported)
llama_kv_cache_init:      Metal KV buffer size =  1024.00 MiB
llama_new_context_with_model: KV self size  = 1024.00 MiB, K (f16):  512.00 MiB, V (f16):  512.00 MiB
llama_new_context_with_model:        CPU  output buffer size =     0.12 MiB
llama_new_context_with_model:      Metal compute buffer size =   164.00 MiB
llama_new_context_with_model:        CPU compute buffer size =    12.01 MiB
llama_new_context_with_model: graph nodes  = 1030
llama_new_context_with_model: graph splits = 2

system_info: n_threads = 8 / 16 | AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | 
main: interactive mode on.
Reverse prompt: 'User:'
sampling: 
    repeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000
    top_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800
    mirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000
sampling order: 
CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature 
generate: n_ctx = 2048, n_batch = 2048, n_predict = 256, n_keep = 1

== Running in interactive mode. ==
 - Press Ctrl+C to interject at any time.
 - Press Return to return control to the AI.
 - To return control without starting a new line, end your input with '/'.
 - If you want to submit another line, end your input with '\'.

▶︎“…one of the most promising composers of his generation.” ★★★★ The Guardian
▶︎“…a composer of rare gifts.” ★★★★ The Daily Telegraph
▶︎“…a talent that deserves to be heard.” ★★★★★ The Times
▶︎“…the most original and compelling young composer in Britain today.” ★★★★★ The Independent
▶︎“…a composer of real merit.” ★★★★ The Scotsman
▶︎“…a composer of real talent.” ★★★★★ The Express
▶︎“…the most exciting British composer of his generation.” ★★★★★ The Jewish Chronicle
▶︎“…a young man to watch.” ★★★★★ The Jewish News of Greater Phoenix
▶︎“…a composer of exceptional talent.” ★★★★★ Jewish Chronicle
▶︎“…one of the most exciting talents of his generation.” ★★★★★ Jewish News of Greater Phoenix
▶︎“His music is as refreshing as it is original.”
▶︎“He is a

llama_print_timings:        load time =       0.00 ms
llama_print_timings:      sample time =       0.00 ms /   283 runs   (    0.00 ms per token,      inf tokens per second)
llama_print_timings: prompt eval time =       0.00 ms /     7 tokens (    0.00 ms per token,      inf tokens per second)
llama_print_timings:        eval time =       0.00 ms /   282 runs   (    0.00 ms per token,      inf tokens per second)
llama_print_timings:       total time =       0.00 ms /   289 tokens

Not sure why the timings are all zero.

Differences:

% git diff ggml-metal.m
diff --git a/ggml-metal.m b/ggml-metal.m
index f894274c..1772b293 100644
--- a/ggml-metal.m
+++ b/ggml-metal.m
@@ -237,6 +237,18 @@ struct ggml_metal_context {
 @implementation GGMLMetalClass
 @end

+static id<MTLDevice> createCustomMTLDevice(void);
+
+static id<MTLDevice> createCustomMTLDevice(void) {
+    NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
+    for (id<MTLDevice> dev in devices) {
+        if (![dev isLowPower] && ![dev isHeadless]) {
+            return dev;
+        }
+    }
+    return nil;
+}
+
 static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * user_data) {
     fprintf(stderr, "%s", msg);

@@ -302,8 +314,8 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) {
 #endif

     // Pick and show default Metal device
-    id<MTLDevice> device = MTLCreateSystemDefaultDevice();
-    GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
+    id<MTLDevice> device = createCustomMTLDevice();
+    GGML_METAL_LOG_INFO("%s: picking custom device: %s\n", __func__, [[device name] UTF8String]);

     // Configure context
     struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
@@ -2865,13 +2877,34 @@ static enum ggml_status ggml_metal_graph_compute(
 static id<MTLDevice> g_backend_device = nil;
 static int g_backend_device_ref_count = 0;

+
 static id<MTLDevice> ggml_backend_metal_get_device(void) {
     if (g_backend_device == nil) {
-        g_backend_device = MTLCreateSystemDefaultDevice();
-    }
+        g_backend_device = createCustomMTLDevice();
+        if (g_backend_device == nil) {
+            fprintf(stderr, "Error: createCustomMTLDevice() returned nil\n");

-    g_backend_device_ref_count++;
+            // Check if Metal is supported
+            if (@available(macOS 10.11, *)) {
+                fprintf(stderr, "Metal framework is available\n");
+            } else {
+                fprintf(stderr, "Metal framework is not available on this system\n");
+            }

+            // List available devices
+            NSArray<id<MTLDevice>> *devices = MTLCopyAllDevices();
+            fprintf(stderr, "Available Metal devices:\n");
+            for (id<MTLDevice> device in devices) {
+                fprintf(stderr, "  %s\n", device.name.UTF8String);
+            }
+
+            // Additional system info
+            fprintf(stderr, "macOS Version: %s\n", [[[NSProcessInfo processInfo] operatingSystemVersionString] UTF8String]);
+        } else {
+            fprintf(stderr, "Successfully created Metal device: %s\n", g_backend_device.name.UTF8String);
+        }
+    }
+    g_backend_device_ref_count++;
     return g_backend_device;
 }

@@ -3067,6 +3100,7 @@ GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
     return &ggml_backend_buffer_type_metal;
 }

+
 // buffer from ptr

 GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size) {
github-actions[bot] commented 1 month ago

This issue was closed because it has been inactive for 14 days since being marked as stale.