huggingface / huggingface_sb3

Additional code for Stable-baselines3 to load and upload models from the Hub.
77 stars 23 forks source link

400 Client Error for `package_to_hub` function #18

Closed LoryPack closed 2 years ago

LoryPack commented 2 years ago

I am going through the notebook of Unit 1 of the deep RL course. However, I cannot run the package_to_hub function, which gives the following error:

HTTPError                                 Traceback (most recent call last)

[<ipython-input-26-97f48e41190b>](https://localhost:8080/#) in <module>
     25                eval_env=eval_env,
     26                repo_id="LorenzoPacchiardi/ppo-LunarLander-v2",
---> 27                commit_message="Upload PPO LunarLander-v2 trained agent (50 steps)")

6 frames

[/usr/local/lib/python3.7/dist-packages/requests/models.py](https://localhost:8080/#) in raise_for_status(self)
    939 
    940         if http_error_msg:
--> 941             raise HTTPError(http_error_msg, response=self)
    942 
    943     def close(self):

HTTPError: 400 Client Error: Bad Request for url: https://huggingface.co/api/models/LorenzoPacchiardi/ppo-LunarLander-v2/commit/main (Request ID: fhQtAuS_qa8bj_c6AI0v5)

I get a similar error with push_to_hub

I logged in to huggingface correctly with the token, and the load_from_hub function works fine.

hezzze commented 2 years ago

got the same issue

hezzze commented 2 years ago

looks like package_to_hub is no longer working, not sure what was the issue, but I've referenced the code in unit 2 to come up with a work around for you to generate the model and video and then upload to HF, worth a try

In Unit 1 colab add the following:

from huggingface_hub import HfApi, HfFolder, Repository
from huggingface_hub.repocard import metadata_eval_result, metadata_save

from pathlib import Path
import datetime
import json
import random
import imageio
import numpy as np
import pickle5 as pickle

from huggingface_sb3.push_to_hub import _add_logdir, _save_model_card, unwrap_vec_normalize, is_atari, _generate_config, _evaluate_agent, _generate_replay, _generate_model_card  

def _push_to_hub(
    repo_id,
    model,
    model_architecture,
    model_name,
    env,
    env_id,
    video_fps=1,
    local_repo_path="hub",
    commit_message="Push to Hub",
    token= None,
    n_eval_episodes= 10,
    video_length=1000,
    logs=None,
    is_deterministic: bool = True,
):
  _, repo_name = repo_id.split("/")

  eval_env = env

  # Step 1: Clone or create the repo
  # Create the repo (or clone its content if it's nonempty)
  api = HfApi()

  repo_url = api.create_repo(
        repo_id=repo_id,
        token=token,
        private=False,
        exist_ok=True,)

  # Git pull
  repo_local_path = Path(local_repo_path) / repo_name
  repo = Repository(repo_local_path, clone_from=repo_url, use_auth_token=True)
  repo.git_pull()

  with repo_local_path as tmpdirname:
    tmpdirname = Path(tmpdirname)

    # Step 1: Save the model
    model.save(tmpdirname / model_name)

    # Retrieve VecNormalize wrapper if it exists
    # we need to save the statistics
    maybe_vec_normalize = unwrap_vec_normalize(eval_env)

    # Save the normalization
    if maybe_vec_normalize is not None:
        maybe_vec_normalize.save(tmpdirname / "vec_normalize.pkl")
        # Do not update the stats at test time
        maybe_vec_normalize.training = False
        # Reward normalization is not needed at test time
        maybe_vec_normalize.norm_reward = False

    # We create two versions of the environment:
    # one for video generation and one for evaluation
    replay_env = eval_env

    # Deterministic by default (except for Atari)
    if is_deterministic:
        is_deterministic = not is_atari(env_id)

    # Step 2: Create a config file
    _generate_config(model_name, tmpdirname)

    # Step 3: Evaluate the agent
    mean_reward, std_reward = _evaluate_agent(
        model, eval_env, n_eval_episodes, is_deterministic, tmpdirname
    )

    # Step 4: Generate a video
    _generate_replay(model, replay_env, video_length, is_deterministic, tmpdirname)

    # Step 5: Generate the model card
    generated_model_card, metadata = _generate_model_card(
        model_architecture, env_id, mean_reward, std_reward
    )
    _save_model_card(tmpdirname, generated_model_card, metadata)

    # Step 6: Add logs if needed
    if logs:
        _add_logdir(tmpdirname, Path(logs))

  # Push everything to hub
  print(f"Pushing repo {repo_name} to the Hugging Face Hub")
  repo.push_to_hub(commit_message=commit_message)

  print(f"Your model is pushed to the hub. You can view your model here: {repo_url}")

then

_push_to_hub(
    repo_id="", # your repo id
    model=model,
    env_id=env_id,
    model_architecture=model_architecture,
    model_name=model_name,
    env=eval_env,
    commit_message="Upload PPO LunarLander-v2 trained agent"
 )

it's working for me, good luck

simoninithomas commented 2 years ago

Hey there 👋 . Thanks for pointing this out 🤗 we investigating this error and I keep you updated.

simoninithomas commented 2 years ago

Hey there 👋 , we updated the Hub library and the problem is solved 🤗 thanks for pointing this out.

LoryPack commented 2 years ago

Thanks a lot, it works for me now!