duclos-cavalcanti / TreeBuilder

Master Thesis on High Performance Multicasting on the Cloud
BSD 3-Clause "New" or "Revised" License
1 stars 0 forks source link

LemonDrop Benchmarking #4

Open duclos-cavalcanti opened 6 months ago

duclos-cavalcanti commented 6 months ago
duclos-cavalcanti commented 6 months ago
 import time
 import cvxpy as cp
 import numpy as np

 def solve_miqp_min(rtt_mat, k=15, thresh=.1):

     start = time.time()
     psd_rtt_mat = rtt_mat - min(np.linalg.eigvals(rtt_mat))* np.eye(len(rtt_mat))
     s_vec = cp.Variable(len(rtt_mat), boolean=True)

     # Compute average of n^2 edges in matrix formed by graph of n vertices.
     objective = cp.Minimize(cp.quad_form(s_vec, psd_rtt_mat ))
     constraints = [sum(s_vec) == k] # Get k elements from n

     prob = cp.Problem(objective, constraints)
     status = prob.solve(solver=cp.GUROBI, TimeLimit=10)  # solve using gurobi solver
     print(status)  # print the human-readable status
     if prob.status in ["infeasible", "unbounded"]:
         print("NOT SOLVED OPTIMALLY")

     sol = s_vec.value
     qp_idx_min = [i for i,v in enumerate(sol) if v > thresh]
     end = time.time()

     elapsed_time = end - start

     return qp_idx_min, elapsed_time, prob.status
duclos-cavalcanti commented 3 months ago
function FAQ(LOAD, OWD, epsilon, max_i)
    P(0) = 11T
    i = 0 #  Iteration number
    s = 0 #  Stopping condition

    while s = 0 do
        # Gradient wrt current solution
        grad(P(i)) = - (LOAD * P(i) * OWD_TPOSE) - ( LOAD_TPOSE * P(i) * OWD )

        # Direction which minimizes 1st order approx of f(P)
        Q(i) = argmin(P element of D) of trace( grad(P(i))_TPOSE * P) 

        # Best step size in chosen direction
        alpha = min (alpha element of [0, 1]) of f( P(i) +  alpha * Q(i) ) 

        # Update our current solution
        P(i+1) = P(i) + alpha * Q(i)

        # Stopping conditions
        if ||P(i)  P(i1)||F < epsilon then s = 1

        if i > max_i then s = 1

        i = i + 1

    P(final) = argmin(P element of P) P(i - 1)* P_TPOSE
    return P(final)
duclos-cavalcanti commented 3 months ago
```Python 
    def pFAQ(self, OWD:np.ndarray, LOAD:np.ndarray, epsilon=1e-6, max_i=100) -> np.ndarray:
        """
        1. Initialize P (doubly stochastic matrix).
        2. While a stopping condition isn't met:
            1. Calculate the gradient.
            2. Find the direction (Q) that minimizes the first-order approximation of the objective function.
            3. Determine the best step size (alpha) in the chosen direction.
            4. Update the current solution (P).
        3. Project the final solution (P).
        4. Return the final solution (P_FINAL).
        """
        # P(0) = 11T, a doubly stochastic matrix, i: iteration, s: stopping condition
        P = np.ones((self.N, self.N)) / self.N
        i = 0
        s = 0

        # Λ = LOAD_MATRIX: LOAD
        # Δ = OWD_MATRIX:  OWD
        # 
        # GOAL:
        # Minimize => trace(Λ * P * Δ_T * P_T ) (3.1)

        self.L.debug(message=f"OWD[{OWD.shape}]: \n{self.to_string(OWD)}")
        self.L.debug(message=f"LOAD[{LOAD.shape}]:\n{self.to_string(LOAD)}")

        while s == 0:
            self.L.debug(message=f"I: {i}")
            self.L.debug(message=f"P[{P.shape}]:\n{self.to_string(P)}")

            # GRADIENT WITH RESPECT TO CURRENT SOLUTION
            # ∇f(P(i)) = - Λ P(i) Δ_T - Λ_T P(i) Δ
            grad = - ( (LOAD @ P @ OWD.T) + (LOAD.T @ P @ OWD) )
            self.L.debug(message=f"Gradient[{grad.shape}]:\n{self.to_string(grad)}")

            # DIRECTION WHICH MINIMIZES 1ST ORDER APPROX OF f(P)
            # Q(i) = argmin_{P ∈ D} trace(∇f(P(i))_T P)
            Q = cp.Variable(grad.shape)
            objective = cp.Minimize(cp.trace(grad.T @ Q))
            constraints = [
                Q >= 0,                  # All entries of Q should be non-negative
                cp.sum(Q, axis=0) == 1,  # Each column should sum to 1
                cp.sum(Q, axis=1) == 1   # Each row should sum to 1
            ]

            problem = cp.Problem(objective, constraints)
            problem.solve(solver=cp.GUROBI)

            Q = Q.value
            self.L.debug(message=f"Q[{Q.shape}]:\n{self.to_string(Q)}")

            # BEST STEP SIZE IN CHOSEN DIRECTION
            # α(i) = min_{α ∈ [0,1]} f(P(i) + α * Q(i))
            alpha     = cp.Variable()
            objective = cp.Minimize(cp.trace((self.LOAD @ (P + alpha* Q) @ self.OWD.T) @ (P + alpha * Q).T))
            constraints = [
                    alpha >= 0, 
                    alpha <= 1,
            ]

            problem     = cp.Problem(objective, constraints)
            problem.solve(solver=cp.GUROBI)

            alpha     = alpha.value
            self.L.debug(message=f"ALPHA: {alpha}")

            # UPDATE OUR CURRENT SOLUTION
            # P(i+1) = P(i) + alpha * Q(i)
            P_NEXT  = P + alpha * Q

            # STOPPING CONDITIONS
            if np.linalg.norm(P_NEXT - P, ord='fro') < epsilon or i >= max_i:
                s = 1

            P = P_NEXT
            i += 1

        # PROJECTION OF P ONTO P[FINAL]
        # P(FINAL) = argmin_{P ∈ P} || P(i-1)P_T ||_F
        P_FINAL = np.zeros_like(P)
        for i in range(len(P)): 
            P_FINAL[i, np.argmax(P[i])] = 1

        return P_FINAL
duclos-cavalcanti commented 3 months ago

Final form of FAQ using scipy:

    def FAQ(self, OWD:np.ndarray, LOAD:np.ndarray, epsilon=1e-6, max_i=100) -> np.ndarray:
        """
        1. Initialize P (doubly stochastic matrix).
        2. While a stopping condition isn't met:
            1. Calculate the gradient.
            2. Find the direction (Q) that minimizes the first-order approximation of the objective function.
            3. Determine the best step size (alpha) in the chosen direction.
            4. Update the current solution (P).
        3. Project the final solution (P).
        4. Return the final solution (P_FINAL).
        """
        # P(0) = 11T, a doubly stochastic matrix, i: iteration, s: stopping condition
        P = np.ones((self.N, self.N)) / self.N
        i = 0
        s = 0

        # Λ = LOAD_MATRIX: LOAD
        # Δ = OWD_MATRIX:  OWD
        # 
        # GOAL:
        # Minimize => trace(Λ * P * Δ_T * P_T ) (3.1)

        while s == 0:
            # GRADIENT WITH RESPECT TO CURRENT SOLUTION
            # ∇f(P(i)) = - Λ P(i) Δ_T - Λ_T P(i) Δ
            grad = - (LOAD @ P @ OWD.T) - (LOAD.T @ P @ OWD)

            # DIRECTION WHICH MINIMIZES 1ST ORDER APPROX OF f(P)
            # Q(i) = argmin_{P ∈ D} trace(∇f(P(i))_T P)
            # Hungarian Algorithm / LAP
            Q = np.zeros((self.N, self.N))
            row, col = optimize.linear_sum_assignment(-grad)
            Q[row, col] = 1

            # BEST STEP SIZE IN CHOSEN DIRECTION
            # α(i) = min_{α ∈ [0,1]} f(P(i) + α * Q(i))
            result = optimize.minimize_scalar(
                        lambda alpha: np.trace(LOAD @ (P + alpha * Q) @ OWD.T @ (P + alpha * Q).T), 
                        bounds=(0, 1), 
                        method='bounded'
            )
            alpha = result.x

            # UPDATE OUR CURRENT SOLUTION
            # P(i+1) = P(i) + alpha * Q(i)
            P_NEXT  = P + (alpha * Q)

            # STOPPING CONDITIONS
            if np.linalg.norm(P_NEXT - P, ord='fro') < epsilon or i >= max_i:
                s = 1

            P = P_NEXT
            i += 1

        # PROJECTION OF P ONTO P[FINAL]
        # P(FINAL) = argmin_{P ∈ P} || P(i-1)P_T ||_F
        # Hungarian Algorithm / LAP
        P_FINAL = np.zeros_like(P)
        row, col = optimize.linear_sum_assignment(-P) 
        P_FINAL[row, col] = 1
        return P_FINAL

Possible Issues:

nrasadi commented 1 month ago

Is it safe to close this issue? If yes, please provide a brief summary, change the state to done, and close the issue.