Closed atantos closed 1 year ago
When trying to download the huggingface model with const model = hgf"gpt2:lmheadmodel" of the text generation example, I get the following error. As a result, I cannot run the rest of the example file that follows.
const model = hgf"gpt2:lmheadmodel"
Thanks for your work!
Alex
Downloading gpt2/pytorch_model.bin [>] 92.7 ┌ Warning: load from base: prediction layer not found in state: initialized. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:103 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 ┌ Warning: HGFGPT2Attention doesn't have field bias. └ @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/models.jl:49 Error showing value of type Transformers.HuggingFace.HGFGPT2LMHeadModel{Transformers.HuggingFace.HGFGPT2Model{12, Transformers.HuggingFace.FakeTHModuleList{12, NTuple{12, Transformers.HuggingFace.HGFGPT2Block{Transformers.HuggingFace.HGFGPT2Attention{Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.HGFGPT2MLP{typeof(gelu), Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}}}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}}, Transformers.HuggingFace.FakeTHLinear{LinearAlgebra.Transpose{Float32, Matrix{Float32}}, Nothing}}: ERROR: MethodError: no method matching print_tree(::typeof(Transformers.HuggingFace._printnode), ::IOContext{Base.TTY}, ::Transformers.HuggingFace.HGFGPT2LMHeadModel{Transformers.HuggingFace.HGFGPT2Model{12, Transformers.HuggingFace.FakeTHModuleList{12, NTuple{12, Transformers.HuggingFace.HGFGPT2Block{Transformers.HuggingFace.HGFGPT2Attention{Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.HGFGPT2MLP{typeof(gelu), Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}}}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}}, Transformers.HuggingFace.FakeTHLinear{LinearAlgebra.Transpose{Float32, Matrix{Float32}}, Nothing}}, ::Int64) Closest candidates are: print_tree(::Function, ::Function, ::IO, ::Any; maxdepth, indicate_truncation, charset, printkeys, depth, prefix, kw...) @ AbstractTrees ~/.julia/packages/AbstractTrees/EUx8s/src/printing.jl:190 print_tree(::IO, ::Any; kw...) @ AbstractTrees ~/.julia/packages/AbstractTrees/EUx8s/src/printing.jl:272 print_tree(::Any; kw...) @ AbstractTrees ~/.julia/packages/AbstractTrees/EUx8s/src/printing.jl:273 Stacktrace: [1] show(io::IOContext{Base.TTY}, x::Transformers.HuggingFace.HGFGPT2LMHeadModel{Transformers.HuggingFace.HGFGPT2Model{12, Transformers.HuggingFace.FakeTHModuleList{12, NTuple{12, Transformers.HuggingFace.HGFGPT2Block{Transformers.HuggingFace.HGFGPT2Attention{Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.HGFGPT2MLP{typeof(gelu), Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}}}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}}, Transformers.HuggingFace.FakeTHLinear{LinearAlgebra.Transpose{Float32, Matrix{Float32}}, Nothing}}; depth::Int64) @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/base.jl:144 [2] show(io::IOContext{Base.TTY}, x::Transformers.HuggingFace.HGFGPT2LMHeadModel{Transformers.HuggingFace.HGFGPT2Model{12, Transformers.HuggingFace.FakeTHModuleList{12, NTuple{12, Transformers.HuggingFace.HGFGPT2Block{Transformers.HuggingFace.HGFGPT2Attention{Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.HGFGPT2MLP{typeof(gelu), Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}}}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}}, Transformers.HuggingFace.FakeTHLinear{LinearAlgebra.Transpose{Float32, Matrix{Float32}}, Nothing}}) @ Transformers.HuggingFace ~/.julia/packages/Transformers/A1N7i/src/huggingface/models/base.jl:143 [3] show(io::IOContext{Base.TTY}, #unused#::MIME{Symbol("text/plain")}, x::Transformers.HuggingFace.HGFGPT2LMHeadModel{Transformers.HuggingFace.HGFGPT2Model{12, Transformers.HuggingFace.FakeTHModuleList{12, NTuple{12, Transformers.HuggingFace.HGFGPT2Block{Transformers.HuggingFace.HGFGPT2Attention{Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.HGFGPT2MLP{typeof(gelu), Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}}}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}}, Transformers.HuggingFace.FakeTHLinear{LinearAlgebra.Transpose{Float32, Matrix{Float32}}, Nothing}}) @ Base.Multimedia ./multimedia.jl:47 [4] display(d::REPL.REPLDisplay{REPL.LineEditREPL}, mime::MIME{Symbol("text/plain")}, x::Transformers.HuggingFace.HGFGPT2LMHeadModel{Transformers.HuggingFace.HGFGPT2Model{12, Transformers.HuggingFace.FakeTHModuleList{12, NTuple{12, Transformers.HuggingFace.HGFGPT2Block{Transformers.HuggingFace.HGFGPT2Attention{Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}, Transformers.HuggingFace.HGFGPT2MLP{typeof(gelu), Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}, Transformers.HuggingFace.FakeHGFConv1D{Matrix{Float32}, Vector{Float32}}}}}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHEmbedding{Matrix{Float32}}, Transformers.HuggingFace.FakeTHLayerNorm{Vector{Float32}}}, Transformers.HuggingFace.FakeTHLinear{LinearAlgebra.Transpose{Float32, Matrix{Float32}}, Nothing}}) @ OhMyREPL ~/.julia/packages/OhMyREPL/oDZvT/src/output_prompt_overwrite.jl:8 [5] display(d::REPL.REPLDisplay, x::Any) @ REPL /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/REPL.jl:281 [6] display(x::Any) @ Base.Multimedia ./multimedia.jl:340 [7] #invokelatest#2 @ ./essentials.jl:816 [inlined] [8] invokelatest @ ./essentials.jl:813 [inlined] [9] print_response(errio::IO, response::Any, show_value::Bool, have_color::Bool, specialdisplay::Union{Nothing, AbstractDisplay}) @ REPL /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/REPL.jl:305 [10] (::REPL.var"#57#58"{REPL.LineEditREPL, Pair{Any, Bool}, Bool, Bool})(io::Any) @ REPL /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/REPL.jl:287 [11] with_repl_linfo(f::Any, repl::REPL.LineEditREPL) @ REPL /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/REPL.jl:557 [12] print_response(repl::REPL.AbstractREPL, response::Any, show_value::Bool, have_color::Bool) @ REPL /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/REPL.jl:285 [13] (::REPL.var"#do_respond#80"{Bool, Bool, REPL.var"#93#103"{REPL.LineEditREPL, REPL.REPLHistoryProvider}, REPL.LineEditREPL, REPL.LineEdit.Prompt})(s::REPL.LineEdit.MIState, buf::Any, ok::Bool) @ REPL /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/REPL.jl:899 [14] (::VSCodeServer.var"#98#101"{REPL.var"#do_respond#80"{Bool, Bool, REPL.var"#93#103"{REPL.LineEditREPL, REPL.REPLHistoryProvider}, REPL.LineEditREPL, REPL.LineEdit.Prompt}})(mi::REPL.LineEdit.MIState, buf::IOBuffer, ok::Bool) @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.47.2/scripts/packages/VSCodeServer/src/repl.jl:122 [15] (::OhMyREPL.Prompt.var"#24#51")(s::Any, data::Any, c::Any) @ OhMyREPL.Prompt ~/.julia/packages/OhMyREPL/oDZvT/src/repl.jl:237 [16] #invokelatest#2 @ ./essentials.jl:816 [inlined] [17] invokelatest @ ./essentials.jl:813 [inlined] [18] (::REPL.LineEdit.var"#27#28"{OhMyREPL.Prompt.var"#24#51", String})(s::Any, p::Any) @ REPL.LineEdit /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/LineEdit.jl:1603 [19] prompt!(term::REPL.Terminals.TextTerminal, prompt::REPL.LineEdit.ModalInterface, s::REPL.LineEdit.MIState) @ REPL.LineEdit /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/LineEdit.jl:2740 [20] run_interface(terminal::REPL.Terminals.TextTerminal, m::REPL.LineEdit.ModalInterface, s::REPL.LineEdit.MIState) @ REPL.LineEdit /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/LineEdit.jl:2642 [21] run_frontend(repl::REPL.LineEditREPL, backend::REPL.REPLBackendRef) @ REPL /Applications/Julia-1.9.app/Contents/Resources/julia/share/julia/stdlib/v1.9/REPL/src/REPL.jl:1300 [22] (::REPL.var"#62#68"{REPL.LineEditREPL, REPL.REPLBackendRef})() @ REPL ./task.jl:514
I downgraded AbstractTrees.jl to 0.3 and the issue is resolved.
When trying to download the huggingface model with
const model = hgf"gpt2:lmheadmodel"
of the text generation example, I get the following error. As a result, I cannot run the rest of the example file that follows.Thanks for your work!
Alex