rfeinman / pytorch-minimize

Newton and Quasi-Newton optimization with PyTorch
https://pytorch-minimize.readthedocs.io
MIT License
308 stars 34 forks source link

Implementing bounds on parameters in minimize_constr #13

Closed richinex closed 2 years ago

richinex commented 2 years ago

I would like to ask if there is a way to implement bounds in the parameters when minimize_constr. I tried to follow the example as implemented in the adversarial example but I didnt quite understand. Here is my MWE. Thank you

import torch
from torchmin import minimize
from torchmin import minimize_constr

# Define function
def rrpwrcwo(p, x):
    Rs = p[0]
    R1 = p[1]
    Q1 = p[2]
    n1 = p[3]
    Wct = p[4]
    Rct = p[5]
    Cdl = p[6]
    Wo = p[7]
    tau = p[8]
    Zw = Wct/torch.sqrt(1j*2*torch.pi*x)
    Zwo = Wo/torch.sqrt(1j*2*torch.pi*x*tau) * 1/(torch.tanh(torch.sqrt(1j*2*torch.pi*x*tau)))
    Z = Rs + 1/((1j*2*torch.pi*x)**n1*Q1+1/(R1+Zw)) + 1/(1j*2*torch.pi*x*Cdl+1/(Rct+Zwo))
    return torch.concat((Z.real, Z.imag),dim = 0)

# Define objective function
def obj_fun(p, x, y, yerr):
    ndata = len(x)
    dof = (2*ndata-(len(p)))
    y_concat = torch.concat([y.real, y.imag], dim = 0)
    sigma = torch.concat([yerr,yerr], dim = 0)
    y_model = rrpwrcwo(p, x)
    chi_sqr = (1/dof)*(torch.sum(torch.abs((1/sigma**2) * (y_concat - y_model)**2)))
    return (chi_sqr)

# Define optimizer
def cnls(p, x, y, yerr, lb, ub):
    """
uses the minimize_constr algorithm
    """
    res = minimize_constr(
        lambda p0: obj_fun(p0, x, y, yerr), p,
        constr=dict(lb=lb, ub=ub)
        )
    return res

# Define data
F       = torch.tensor([1.0000e+05, 7.9433e+04, 6.3096e+04, 5.0119e+04, 3.9811e+04, 3.1623e+04,
        2.5119e+04, 1.9953e+04, 1.5849e+04, 1.2589e+04, 1.0000e+04, 7.9433e+03,
        6.3096e+03, 5.0119e+03, 3.9811e+03, 3.1623e+03, 2.5119e+03, 1.9953e+03,
        1.5849e+03, 1.2589e+03, 1.0000e+03, 7.9433e+02, 6.3096e+02, 5.0119e+02,
        3.9811e+02, 3.1623e+02, 2.5119e+02, 1.9953e+02, 1.5849e+02, 1.2589e+02,
        1.0000e+02, 7.9433e+01, 6.3096e+01, 5.0119e+01, 3.9811e+01, 3.1623e+01,
        2.5119e+01, 1.9953e+01, 1.5849e+01, 1.2589e+01, 1.0000e+01, 7.9433e+00,
        6.3096e+00, 5.0119e+00, 3.9811e+00, 3.1623e+00, 2.5119e+00, 1.9953e+00,
        1.5849e+00, 1.2589e+00, 1.0000e+00, 7.9433e-01, 6.3096e-01, 5.0119e-01,
        3.9811e-01, 3.1623e-01, 2.5119e-01, 1.9953e-01, 1.5849e-01, 1.2589e-01,
        1.0000e-01, 7.9433e-02, 6.3096e-02, 5.0119e-02, 3.9811e-02, 3.1623e-02,
        2.5119e-02, 1.9953e-02, 1.5849e-02, 1.2589e-02, 1.0000e-02],
       dtype=torch.float64)  

ydata       = torch.tensor([  48.0622-7.1378j,   48.7805-8.6535j,   49.5610-10.2042j,
          50.1160-11.9013j,   51.1492-14.2998j,   52.3406-17.1319j,
          54.0447-20.1065j,   56.0564-23.7858j,   58.2409-27.8527j,
          61.7126-32.8414j,   65.5583-38.8114j,   69.6897-44.9863j,
          75.7099-51.8982j,   82.6758-59.9616j,   91.1126-68.3999j,
         102.0122-77.8601j,  115.2241-86.9775j,  129.4085-96.0057j,
         147.0336-103.6792j,  165.8611-109.9053j,  186.5483-114.6322j,
         208.2319-116.6439j,  230.4633-116.2142j,  251.1077-114.5065j,
         271.3128-107.2696j,  289.6310-100.5185j,  304.4211-92.9148j,
         318.1058-84.6961j,  330.8532-77.4144j,  338.9193-69.2815j,
         346.1118-61.9762j,  354.2415-57.4286j,  358.7874-52.0874j,
         363.2482-50.2537j,  366.1636-47.9450j,  368.6635-45.9742j,
         371.3439-49.3821j,  374.3211-52.5983j,  376.4520-58.2518j,
         380.8174-67.8195j,  380.5176-78.4283j,  381.6630-93.4095j,
         385.3211-112.4897j,  388.1656-138.5917j,  393.6918-171.1272j,
         402.3645-212.0358j,  413.7263-261.9386j,  431.7024-325.5902j,
         455.0122-402.3275j,  495.0891-493.1883j,  555.1758-602.9261j,
         642.5894-723.7816j,  764.1397-859.6118j,  927.3895-984.3333j,
        1140.1196-1092.5574j, 1373.8181-1153.8724j, 1634.0118-1166.1475j,
        1870.4850-1130.0780j, 2081.0996-1050.1113j, 2253.4805-959.1526j,
        2380.8914-851.2341j, 2468.9602-768.0323j, 2533.3179-714.4904j,
        2572.3921-682.3876j, 2605.5945-674.2517j, 2629.2695-698.3192j,
        2643.5898-756.9058j, 2664.7559-853.3770j, 2682.2969-982.0924j,
        2679.1318-1183.3368j, 2703.1289-1427.6873j])

sigma       = torch.tensor([  48.5893,   49.5421,   50.6006,   51.5098,   53.1105,   55.0730,
          57.6637,   60.8940,   64.5583,   69.9071,   76.1854,   82.9483,
          91.7900,  102.1307,  113.9300,  128.3304,  144.3665,  161.1324,
         179.9118,  198.9701,  218.9539,  238.6762,  258.1067,  275.9834,
         291.7489,  306.5780,  318.2850,  329.1880,  339.7894,  345.9280,
         351.6169,  358.8663,  362.5487,  366.7079,  369.2892,  371.5190,
         374.6130,  377.9985,  380.9323,  386.8093,  388.5160,  392.9275,
         401.4054,  412.1653,  429.2758,  454.8147,  489.6746,  540.7180,
         607.3742,  698.8190,  819.5975,  967.8744, 1150.1487, 1352.3917,
        1579.0992, 1794.1008, 2007.4597, 2185.3582, 2331.0317, 2449.1116,
        2528.4863, 2585.6602, 2632.1467, 2661.3633, 2691.4194, 2720.4243,
        2749.8135, 2798.0664, 2856.4353, 2928.8279, 3056.9915])

p0 = torch.tensor([20.0, 50.0, 1.0e-5, 0.7, 50.0, 500.0, 1.0e-6, 5.0, 5.0])

# lb = np.zeros((2*order+1))
lb = torch.zeros(len(p0))
lb[3] = (0.499)
ub = torch.full((len(p0),),torch.inf)
ub[3] = (0.999)

res = cnls(p0, F, ydata, sigma, lb, ub)
print(res)

# Error
# Output exceeds the size limit. Open the full output data in a text editor
# ---------------------------------------------------------------------------
# AssertionError                            Traceback (most recent call last)
# /tmp/ipykernel_884495/253454945.py in <module>
#      62 
#      63 
# ---> 64 res = cnls(p0, F, ydata, sigma, lb, ub)
#      65 print(res)

# /tmp/ipykernel_884495/3097738329.py in cnls(p, x, y, yerr, lb, ub)
#       2     """
#       3     """
# ----> 4     res = minimize_constr(
#       5         lambda p0: obj_fun(p0, x, y, yerr), p,
#       6         constr=dict(lb=lb, ub=ub)

# ~/anaconda3/envs/simulation/lib/python3.10/site-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)
#      25         def decorate_context(*args, **kwargs):
#      26             with self.clone():
# ---> 27                 return func(*args, **kwargs)
#      28         return cast(F, decorate_context)
#      29 

# ~/Dropbox/impedance_fitting_new_cost_function/pytorch-minimize/torchmin/minimize_constr.py in minimize_constr(f, x0, constr, bounds, max_iter, tol, callback, disp, **kwargs)
#     208     # build constraints
#     209     if constr is not None:
# ...
# ---> 42     assert 'fun' in constr
#      43     assert 'lb' in constr or 'ub' in constr
#      44     if 'lb' not in constr:

# AssertionError: 
rfeinman commented 2 years ago

hi @richinex

For bounded minimization you'll want to use the bounds argument rather than constr:

res = minimize_constr(
    lambda p0: obj_fun(p0, x, y, yerr), p,
    bounds=dict(lb=lb, ub=ub)
)

Bounds are also equivalent to a linear constraint:

res = minimize_constr(
    lambda p0: obj_fun(p0, x, y, yerr), p,
    constr=dict(fun=lambda x: x, lb=lb, ub=ub)
)