from nnsight.models.UnifiedTransformer import UnifiedTransformer
device = "cuda:0"
# Pass in a model name from the TransformerLens library to load a HookedTransformer.
unified_model = UnifiedTransformer("gpt2", device=device)
# kwargs are passed to from_pretrained to process the TransformerLens model.
unified_model = UnifiedTransformer("gpt2", fold_ln=True, device=device)
# Pass process=False to skip default TransformerLens processing.
unified_model = UnifiedTransformer("gpt2", processing=False, device=device)
with unified_model.invoke("Hello, my name is") as invoker:
pass
# You can also make use of TransformerLens methods by calling `.local_model`
unified_model.local_model.to_str_tokens("Hello, my name is")
Test snippet.