Open ThiinhUET opened 1 week ago
Hi @ThiinhUET,
The configurations are generated from the queso/configs/Configuration
dataclasses. You can initialize this dataclass with the desired hyperparameters and save to a YAML file for running the training scripts. Here's an example snippet:
import copy
import itertools
import os
import sys
import pathlib
import subprocess
from math import pi
import platform
import numpy as np
from dotenv import load_dotenv
import pathlib
import jax
from queso.io import IO
from queso.configs import Configuration
ansatze = "hardware_efficient_ansatz"
ns = 4
loss_funcs = "loss_cfi"
print(n, ansatz)
config = Configuration()
config.preparation = ansatz
config.n = n
config.k = n
prefix = f"{config.preparation}"
folder = f"vqs-example-data/n{config.n}_{loss_fi}"
config.train_circuit = False
config.sample_circuit_training_data = False
config.sample_circuit_testing_data = True
config.train_nn = False
config.benchmark_estimator = True
config.n_grid = 250
config.seed = 744
config.interaction = 'local_rz'
config.detection = 'local_r'
config.loss_fi = loss_fi
config.lr_circ = 0.5e-3
config.n_shots = 1000
config.n_shots_test = 10000
config.n_phis = 250
config.phi_center = pi/2/n
config.phi_range = [-pi/2/n + config.phi_center, pi/2/n + config.phi_center]
config.phis_test = np.linspace(-pi/3/n + config.phi_center, pi/3/n + config.phi_center, 5).tolist()
config.n_sequences = np.logspace(0, 3, 10, dtype='int').tolist()
config.n_epochs = 3000
config.lr_nn = 1.0e-3
config.l2_regularization = 0.01
# config.n_grid = 500
config.nn_dims = [64, 64, 64]
config.batch_size = 1000
io = IO(path=data_path, folder=folder)
io.save_yaml(config, 'config.yaml')
vqs(io, config)
Hello, Can you please upload config.yaml file ?
I am trying to run the training code but the config file is missing. Thank you so much !