symforce-org / symforce

Fast symbolic computation, code generation, and nonlinear optimization for robotics
https://symforce.org
Apache License 2.0
1.4k stars 143 forks source link

Optimization result depends on the order of factors #300

Closed mikoff closed 8 months ago

mikoff commented 1 year ago

Describe the bug The optimization result depends on the order of loop-closure. E.g. we have sequence of poses: pose0->pose1->pose2. Assume that we know the relative position of pose2 w.r.t. pose0. We can add loop closure two ways:

To Reproduce

import symforce

symforce.set_log_level("warning")
symforce.set_epsilon_to_number(0.000001)

import symforce.symbolic as sf
from symforce.opt.factor import Factor
from symforce.values import Values
from symforce.opt.optimizer import Optimizer

import numpy as np

######################
### define residuals
######################

def delta_pose_residual(
        poseA: sf.Pose2, 
        poseB: sf.Pose2, 
        deltaMeasured: sf.Pose2,
        epsilon: sf.Scalar
    ) -> sf.V3:

    deltaEst = poseA.inverse() * poseB
    residual = sf.V3(deltaEst.local_coordinates(deltaMeasured, epsilon=epsilon))
    return residual

def prior_pose_residual(
        pose: sf.Pose2,
        posePrior: sf.Pose2,
        epsilon: sf.Scalar
    ) -> sf.V3:

    residual = sf.V3(pose.local_coordinates(posePrior, epsilon=epsilon))
    return residual

######################
### generate data
######################
pose0 = sf.Pose2.identity()
pose1 = sf.Pose2(0.0, sf.V2(1.0, 0.0))
pose2 = sf.Pose2(sf.Rot2.from_angle(np.pi/2), sf.V2(1., 1.))

deltaPose01 = sf.Pose2(sf.Rot2.from_angle(np.pi/2), sf.V2(1.0, 0.0))  # from pose0 to pose1
deltaPose12 = sf.Pose2(0.0, sf.V2(1.0, 0.0))                          # from pose1 to pose2
deltaPose20 = pose2.inverse() * pose0                                 # from pose2 to pose0
deltaPose02 = pose0.inverse() * pose2                                 # from pose0 to pose2

initPoses = [sf.Pose2.identity()] * 3
deltaPoses = [deltaPose01, deltaPose12]

assert pose0 * deltaPose01 * deltaPose12 == pose2
assert pose0 * deltaPose02 == pose2

######################
### optimization routine
######################
def optimize(pose0, initPoses, deltaPoses, loopClosureDeltaPose, loopClosure):
    initial_values = Values(
        poses=initPoses,
        deltaPoses=deltaPoses,
        priorPose=pose0,
        epsilon=sf.numeric_epsilon,
        loopClosureDeltaPose = loopClosureDeltaPose
    )

    factors = []
    # Prior factor
    factors.append(Factor(
        residual=prior_pose_residual,
        keys=[f"poses[0]", "priorPose", "epsilon"]
    ))
    # Odometry factors
    for i in range(len(deltaPoses)):
        factors.append(Factor(
            residual=delta_pose_residual,
            keys=[f"poses[{i}]", f"poses[{i + 1}]", f"deltaPoses[{i}]", "epsilon"],
        ))
    # Loop closure factor
    factors.append(Factor(
        residual=delta_pose_residual,
        keys=[loopClosure[0], loopClosure[1], f"loopClosureDeltaPose", "epsilon"],
    ))

    optimizer = Optimizer(
        factors=factors,
        optimized_keys=[f"poses[{i}]" for i in range(len(initPoses))],
        debug_stats=True,
    )

    return optimizer.optimize(initial_values)

#####################
### solve optimization problem for forward loop closure: deltaPose02
#####################
result = optimize(pose0, initPoses, deltaPoses, deltaPose02, ["poses[0]", "poses[2]"])
print("Optimized poses using loop closure pose0->pose2:", [f"{p.t[0]:.3f}, {p.t[1]:.2f}" for p in result.optimized_values["poses"]])

#####################
### solve optimization problem for backward loop closure: deltaPose20
#####################
result = optimize(pose0, initPoses, deltaPoses, deltaPose20, ["poses[2]", "poses[0]"])
print("Optimized poses using loop closure pose2->pose0:", [f"{p.t[0]:.3f}, {p.t[1]:.2f}" for p in result.optimized_values["poses"]])

Expected behavior Optimization result should be the same, however we are getting the following result:

Optimized poses using loop closure pose0->pose2: ['0.000, -0.00', '1.000, -0.00', '1.000, 1.00']
Optimized poses using loop closure pose2->pose0: ['0.000, 0.00', '0.000, 0.00', '0.000, 0.00']

Environment (please complete the following information):

mikoff commented 1 year ago

GTSAM produces the same result for both scenarios (that is what expected). Any ideas why it is not the case for Symforce, how can I fix it adhoc?

import gtsam

pose0 = gtsam.Pose2.Identity()
pose1 = gtsam.Pose2(1.0, 0.0, 0.0)
pose2 = gtsam.Pose2(1., 1., np.pi/2)

deltaPose01 = gtsam.Pose2(1.0, 0.0, np.pi/2)
deltaPose12 = gtsam.Pose2(1., 0.0, 0.0)
deltaPose20 = pose2.inverse() * pose0
deltaPose02 = pose0.inverse() * pose2

from gtsam.symbol_shorthand import L, X

PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([1., 1., 1.]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([1., 1., 1.]))

graph = gtsam.NonlinearFactorGraph()

X0 = X(0)
X1 = X(1)
X2 = X(2)

graph.add(
    gtsam.PriorFactorPose2(X0, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))

graph.add(
    gtsam.BetweenFactorPose2(X0, X1, deltaPose01,
                             ODOMETRY_NOISE))
graph.add(
    gtsam.BetweenFactorPose2(X1, X2, deltaPose12,
                             ODOMETRY_NOISE))
graph.add(
    gtsam.BetweenFactorPose2(X2, X0, deltaPose20,         # or X0, X2, deltaPose02 for forward
                             ODOMETRY_NOISE))

initial_estimate = gtsam.Values()
initial_estimate.insert(X0, pose0)
initial_estimate.insert(X1, pose1)
initial_estimate.insert(X2, pose2)

params = gtsam.LevenbergMarquardtParams()
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
                                              params)
result = optimizer.optimize()
print("\nFinal Result:\n{}".format(result))
aaron-skydio commented 1 year ago

A couple notes/observations:

1) Not sure if this was intentional or not, just calling it out either way for other readers: pose1 is not consistent with deltaPose01 and deltaPose12 in the examples, for them to be consistent pose1 should be rotated (or deltaPose01 should not). There's still a 0 error solution to those examples, it just isn't [pose0, pose1, pose2], it has pose1 rotated. 2) Generally this is a nonlinear optimization problem, so while yes you would hope that specifying the between factor in either direction would be identical, they aren't guaranteed to be 3) This seems to come down to our formulation of Pose2 having a narrower basin of convergence (being less globally linear?) than GTSAM's, I think. If you initialize SymForce at the solution GTSAM converges to, it's happy to stay there. Notably this is not initializing at pose0, pose1, pose2, because of (1).

All that to say I don't think this is a bug per se, but yeah ideally we'd probably converge here, we'll dig into this some more. It's possible that requires a more expensive formulation of either the poses or the between factors specifically, if so we might want a "fast, less linear" version and a "more globally convergent" version

mikoff commented 1 year ago

@aaron-skydio thanks for an update.

  1. Thanks for catching this - indeed, I messed up with delta poses, but the optimization result is wrong even if I fix all the numbers.
  2. Initially I experienced this problem with the graph of Pose3 objects on real data - the addition of backward loop closures didn't lead to any effect at all (the trajectory looked like there are none of them). After I have added this constraint in forward manner optimizer worked as expected.

My biggest concern is the following: when we add this backward constraints on real data or more complex examples we just see the worse result. It is easy to overlook it and get non-optimal result in the end.

dawood95 commented 10 months ago

Hi, this bug still exists. Any suggestions / ideas on how to avoid in bigger systems? Thanks!

tbretl commented 9 months ago

Hi @aaron-skydio - I think there's a real problem here that needs to be looked at. I created a very simple example showing that this has nothing to do (I don't think) with the "order of factors" or with "loop closure" per say - instead, results seem to depend on the order in which variables are listed in optimized_keys.

# Do all imports
import symforce
symforce.set_epsilon_to_symbol()
import symforce.symbolic as sf
from symforce.values import Values
from symforce.opt.factor import Factor
from symforce.opt.optimizer import Optimizer
import sym

# Define prior residual
def prior_residual(
        a: sf.Scalar,
        a_prior: sf.Scalar,
    ) -> sf.V1:
    return sf.V1(a_prior - a)

# Define delta residual
def delta_residual(
        a: sf.Scalar,
        b: sf.Scalar,
        b_minus_a: sf.Scalar,
    ) -> sf.V1:
    return sf.V1(b_minus_a - (b - a))

# Define values
initial_values = Values(
    p0=1.1,              # initial guess
    p1=1.9,              # initial guess
    p1_minus_p0=1.,      # perfect measurement
    p0_prior=1.,         # perfect prior
    epsilon=sym.epsilon,
)

# Define factors
factors = []
factors.append(Factor(residual=prior_residual, keys=['p0', 'p0_prior']))
factors.append(Factor(residual=delta_residual, keys=['p0', 'p1', 'p1_minus_p0']))

# Create and run optimizer with keys ordered p0, p1
optimizer = Optimizer(factors=factors, optimized_keys=['p0', 'p1'], params=Optimizer.Params(verbose=False))
result = optimizer.optimize(initial_values)
print(f'result.error() = {result.error():6.4f} (status = {result.status})')

# Create and run optimizer with keys ordered p1, p0
optimizer = Optimizer(factors=factors, optimized_keys=['p1', 'p0'], params=Optimizer.Params(verbose=False))
result = optimizer.optimize(initial_values)
print(f'result.error() = {result.error():6.4f} (status = {result.status})')

In this example, we estimate the value of two numbers, p0 and p1, given a perfect prior on p0 and a perfect measurement of the difference p1 - p0. We do this twice:

The results should be the same, but they are not:

result.error() = 0.0000 (status = optimization_status_t.SUCCESS)
result.error() = 0.0250 (status = optimization_status_t.FAILED)

What is going on?

(I hope I am just misunderstanding - thanks for your attention to this!)

aaron-skydio commented 9 months ago

@tbretl That looks like a separate bug which I suspect is fixed by https://github.com/symforce-org/symforce/pull/375

tbretl commented 9 months ago

@tbretl That looks like a separate bug which I suspect is fixed by #375

@aaron-skydio - thanks for the quick response - I can confirm that #375 fixes this bug (and will look forward to seeing it merged). I assume there is no similar issue with the C++ API?

This isn't a separate bug - it is the source of error for the code posted by @mikoff. This code has a couple of syntax errors - here is a version with these syntax errors fixed (I made no attempt to fix the mistakes in problem definition that you pointed out earlier):

import symforce

symforce.set_log_level("warning")
symforce.set_epsilon_to_number(0.000001)

import symforce.symbolic as sf
from symforce.opt.factor import Factor
from symforce.values import Values
from symforce.opt.optimizer import Optimizer

import numpy as np

######################
### define residuals
######################

def delta_pose_residual(
        poseA: sf.Pose2, 
        poseB: sf.Pose2, 
        deltaMeasured: sf.Pose2,
        epsilon: sf.Scalar
    ) -> sf.V3:

    deltaEst = poseA.inverse() * poseB
    residual = sf.V3(deltaEst.local_coordinates(deltaMeasured, epsilon=epsilon))
    return residual

def prior_pose_residual(
        pose: sf.Pose2,
        posePrior: sf.Pose2,
        epsilon: sf.Scalar
    ) -> sf.V3:

    residual = sf.V3(pose.local_coordinates(posePrior, epsilon=epsilon))
    return residual

######################
### generate data
######################
pose0 = sf.Pose2.identity()
# ORIGINAL
# pose1 = sf.Pose2(0.0, sf.V2(1.0, 0.0))
# FIXED (BRETL)
pose1 = sf.Pose2(sf.Rot2.from_angle(0.0), sf.V2(1.0, 0.0))
pose2 = sf.Pose2(sf.Rot2.from_angle(np.pi/2), sf.V2(1., 1.))

deltaPose01 = sf.Pose2(sf.Rot2.from_angle(np.pi/2), sf.V2(1.0, 0.0))  # from pose0 to pose1
# ORIGINAL
# deltaPose12 = sf.Pose2(0.0, sf.V2(1.0, 0.0))                          # from pose1 to pose2
# FIXED (BRETL)
deltaPose12 = sf.Pose2(sf.Rot2.from_angle(0.0), sf.V2(1.0, 0.0))      # from pose1 to pose2
deltaPose20 = pose2.inverse() * pose0                                 # from pose2 to pose0
deltaPose02 = pose0.inverse() * pose2                                 # from pose0 to pose2

initPoses = [sf.Pose2.identity()] * 3
deltaPoses = [deltaPose01, deltaPose12]

assert pose0 * deltaPose01 * deltaPose12 == pose2
assert pose0 * deltaPose02 == pose2

######################
### optimization routine
######################
def optimize(pose0, initPoses, deltaPoses, loopClosureDeltaPose, loopClosure):
    initial_values = Values(
        poses=initPoses,
        deltaPoses=deltaPoses,
        priorPose=pose0,
        epsilon=sf.numeric_epsilon,
        loopClosureDeltaPose = loopClosureDeltaPose
    )

    factors = []
    # Prior factor
    factors.append(Factor(
        residual=prior_pose_residual,
        keys=[f"poses[0]", "priorPose", "epsilon"]
    ))
    # Odometry factors
    for i in range(len(deltaPoses)):
        factors.append(Factor(
            residual=delta_pose_residual,
            keys=[f"poses[{i}]", f"poses[{i + 1}]", f"deltaPoses[{i}]", "epsilon"],
        ))
    # Loop closure factor
    factors.append(Factor(
        residual=delta_pose_residual,
        keys=[loopClosure[0], loopClosure[1], f"loopClosureDeltaPose", "epsilon"],
    ))

    optimizer = Optimizer(
        factors=factors,
        optimized_keys=[f"poses[{i}]" for i in range(len(initPoses))],
        debug_stats=True,
    )

    return optimizer.optimize(initial_values)

#####################
### solve optimization problem for forward loop closure: deltaPose02
#####################
result = optimize(pose0, initPoses, deltaPoses, deltaPose02, ["poses[0]", "poses[2]"])
print("Optimized poses using loop closure pose0->pose2:", [f"{p.t[0]:.3f}, {p.t[1]:.2f}" for p in result.optimized_values["poses"]])

#####################
### solve optimization problem for backward loop closure: deltaPose20
#####################
result = optimize(pose0, initPoses, deltaPoses, deltaPose20, ["poses[2]", "poses[0]"])
print("Optimized poses using loop closure pose2->pose0:", [f"{p.t[0]:.3f}, {p.t[1]:.2f}" for p in result.optimized_values["poses"]])

Here are the results without the bug-fix of #375:

Optimized poses using loop closure pose0->pose2: ['0.000, -0.00', '1.000, -0.00', '1.000, 1.00']
Optimized poses using loop closure pose2->pose0: ['0.000, 0.00', '0.000, 0.00', '0.000, 0.00']

Here are the results with the bug-fix of #375:

Optimized poses using loop closure pose0->pose2: ['0.000, -0.00', '1.000, -0.00', '1.000, 1.00']
Optimized poses using loop closure pose2->pose0: ['0.000, -0.00', '1.000, -0.00', '1.000, 1.00']

So, I think this issue is closed as soon as #375 is merged.

aaron-skydio commented 9 months ago

Ah, you're right it does! We'll merge that PR, I also added a bit to not degrade the number of cache hits either. Yes this only applies to the Python layer, the C++ optimizer doesn't have an analogous cache