explosion / spaCy

💫 Industrial-strength Natural Language Processing (NLP) in Python
https://spacy.io
MIT License
29.69k stars 4.36k forks source link

Not getting the folder structure inside `/model-best/transformer/model` while training Custom NER model #10290

Closed Chetan8000 closed 2 years ago

Chetan8000 commented 2 years ago

Not getting the folder structure inside the model while training the Custom NER model, right now just getting model file instead of a folder /model-best/transformer/model

when I tried this previously that time I'll get the folder structure in the above location ( /model-best/transformer/model) right now I'm using the same config.cfg file and configuration but right now I'll just get the model file.

here is the folder structure I'll get previously,

/model-best/transformer/model
                             | ner
                             | transformer
                             |_______model
                             |       | config.json
                             |       | pytorch_model.bin
                             |       | special_tokens_map.json
                             |       | tokenizer.json
                             |       | tokenizer_config.json
                             |       | vocab.json
                             |_______config
                             | vocab
                             | meta.json
                             | tokenizer

and right now I'm getting this

/model-best/transformer/model
                             | ner
                             | transformer
                             |_______model
                             |_______config
                             | vocab
                             | meta.json
                             | tokenizer

here is my config.cfg

[paths]
train = "/content/drive/MyDrive/Flue_Model/corpus/train_df_bio_150_doc.spacy"
dev = "/content/drive/MyDrive/Flue_Model/corpus/valid_df_bio_100_doc.spacy"
vectors = null
init_tok2vec = null

[system]
gpu_allocator = "pytorch"
seed = 0

[nlp]
lang = "en"
pipeline = ["transformer","ner"]
batch_size = 128
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}

[components]

[components.ner]
factory = "ner"
incorrect_spans_key = null
moves = null
update_with_oracle_cut_size = 100

[components.ner.model]
@architectures = "spacy.TransitionBasedParser.v2"
state_type = "ner"
extra_state_tokens = false
hidden_width = 64
maxout_pieces = 2
use_upper = false
nO = null

[components.ner.model.tok2vec]
@architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"

[components.transformer]
factory = "transformer"
max_batch_items = 1024
set_extra_annotations = {"@annotation_setters":"spacy-transformers.null_annotation_setter.v1"}

[components.transformer.model]
@architectures = "spacy-transformers.TransformerModel.v1"
name = "studio-ousia/luke-base"

[components.transformer.model.get_spans]
@span_getters = "spacy-transformers.strided_spans.v1"
window = 128
stride = 96

[components.transformer.model.tokenizer_config]
use_fast = true

[corpora]

[corpora.dev]
@readers = "spacy.Corpus.v1"
path = ${paths.dev}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null

[corpora.train]
@readers = "spacy.Corpus.v1"
path = ${paths.train}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null

[training]
accumulate_gradient = 10
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.15
patience = 1600
max_epochs = 0
max_steps = 130000
eval_frequency = 200
frozen_components = []
annotating_components = []
before_to_disk = null

[training.batcher]
@batchers = "spacy.batch_by_words.v1"
discard_oversize = false
tolerance = 0.2
get_length = null

[training.batcher.size]
@schedules = "compounding.v1"
start = 100
stop = 1000
compound = 1.001
t = 0.0

[training.logger]
@loggers = "spacy.ConsoleLogger.v1"
progress_bar = false

[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001

[training.optimizer.learn_rate]
@schedules = "warmup_linear.v1"
warmup_steps = 250
total_steps = 20000
initial_rate = 0.00005

[training.score_weights]
ents_f = 1.0
ents_p = 0.0
ents_r = 0.0
ents_per_type = null

[pretraining]

[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null
before_init = null
after_init = null

[initialize.components]

[initialize.tokenizer]

Your Environment

adrianeboyd commented 2 years ago

The serialization format changed in spacy-transformers v1.1. The model file contains all the same data as in the directory in the v1.0. Some additional details here: https://github.com/explosion/spacy-transformers/releases/tag/v1.1.0

adrianeboyd commented 2 years ago

This is not a bug, so let me convert it to a discussion...