Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

SciPy optimizer ignores one of the constraints

I am trying to solve an optimization problem where I need to create a portfolio that with a minimum tracking error from benchmark portfolio and it's subject to some constraints:

import scipy.optimize as opt
import numpy as np

def random_portfolio(n):
    a = np.random.random(n)
    a /= a.sum()
    return a

portfolio_weights = [1 for i in range(20)]
portfolio_weights = [i/len(portfolio_weights) for i in portfolio_weights]

def tracking_error_function(W, port_weights):
    weight_diff = list(np.array(port_weights)-np.array(W))
    weight_diff = sum([i**2 for i in weight_diff])
    return weight_diff

def total_max_weight_constraint(weights):
    max_weights_share = sum([i for i in weights if i > 0.045])
    max_ineq = 0.36 - max_weights_share
    return max_ineq

def gen_initial_weights(n):
    max_weights = [0.089 for i in range(4)]
    rest_of_n = n - 4
    rest_of_weight = 1 - sum(max_weights)
    other_weights = [rest_of_weight/rest_of_n for i in range(rest_of_n)]
    all_weights = max_weights + other_weights
    return all_weights

initial_weights = np.asarray(gen_initial_weights(len(portfolio_weights)))

tr_err = tracking_error_function(initial_weights, portfolio_weights)  
b_ = [(0.0, 0.09) for i in range(len(initial_weights))]
c_ = ({'type': 'eq', 'fun': lambda W: sum(W) - 1},
  {'type': 'ineq', 'fun': total_max_weight_constraint})

optimized = opt.minimize(tracking_error_function, initial_weights, args=(portfolio_weights), method='SLSQP', constraints=c_, bounds=b_, options={'maxiter': 100000 })

So my initial guess abides the constraints and the benchmark is equally-weighted. When I run it, the result is exactly equally-weighted portfolio although it is clearly violating the second constraint. Moreover, the status is success. Any ideas what i do wrong?

Update: This is a solution that seems to work in my case

import scipy.optimize as opt
import numpy as np
import random
import matplotlib.pyplot as plt


def random_portfolio(n):
    #random.seed(123)
    a = np.random.random(n)
    a /= a.sum()
    return a

def continous_step_function(x, cutoff):
    return x / (1 + safe_exp(-(x - cutoff) * 200000))

def safe_exp(x):
    try:
        ans = np.math.exp(x)
    except OverflowError:
        ans = float('inf')
    return ans

def gen_initial_weights(n):
    max_weights = [0.0899999 for i in range(4)]
    rest_of_n = n - 4
    rest_of_weight = 1 - sum(max_weights)
    other_weights = [rest_of_weight/rest_of_n for i in range(rest_of_n)]
    all_weights = max_weights + other_weights
    return all_weights

def tracking_error_function(W, port_weights):
    weight_diff = port_weights - W
    weight_diff = np.sum(weight_diff ** 2)

    excessive_weight = max(0,(sum([continous_step_function(i,0.045) for i in W]) - 0.36))

    return weight_diff + excessive_weight

def total_max_weight_constraint(weights):
    max_weights_share = sum([continous_step_function(i,0.045) for i in weights])
    max_ineq = 0.36 - max_weights_share
    return max_ineq

def run():
    portfolio_weights = sorted(random_portfolio(20))

    initial_weights = np.asarray(gen_initial_weights(len(portfolio_weights)))
    initial_weights = sorted(initial_weights)

    b_ = [(0.0, 0.09) for i in range(len(initial_weights))]
    c_ = ({'type': 'eq', 'fun': lambda W: sum(W) - 1},
          {'type': 'ineq', 'fun': total_max_weight_constraint}
          )

    optimized = opt.minimize(tracking_error_function, initial_weights, args=(portfolio_weights), constraints=c_,
                             bounds=b_, options={'eps': 0.00000001, 'ftol' : 0.00000001, 'iprint': 0, 'disp': 0, 'maxiter': 10000})

    result = optimized.x

    if tracking_error_function(result, portfolio_weights) > 0.05:
        print('Excessive tracking error: ')
        print('Residual error: {}'.format(tracking_error_function(result, portfolio_weights)))
        print('Target: {} {}'.format(sum(portfolio_weights), portfolio_weights))
        print('Result: {} {}'.format(sum(result), result))

    if sum([i for i in result if i > 0.045]) > 0.36:
        print('Excessive weight > .045: ')
        print('Percentage > .045: {}'.format(sum([x for x in result if x > 0.045])))
        print('Target: {} {}'.format(sum(portfolio_weights), portfolio_weights))
        print('Result: {} {}'.format(sum(result), result))

    if not all(b >= (a - 0.001) for a, b in zip(result, result[1:])):
        print('Result not continously rising: ')
        print('Target: {} {}'.format(sum(portfolio_weights), portfolio_weights))
        print('Result: {} {}'.format(sum(result), result))

def plot_output(result, target):
    plt.bar(range(len(result)), result,  color='b', width = 0.3)
    plt.plot(range(len(target)), target, color='r')
    plt.show()
like image 889
Даниил Кушко Avatar asked Oct 30 '22 03:10

Даниил Кушко


1 Answers

It appears that the minimization simply ignores the inequality constraint in this particular case. I do not know why this happens - when testing a simpler example both equality and inequality constraints worked correctly together.

Equality constraints often cause problems in numeric optimization because it may be impossible for floating point numbers to match them exactly. Getting rid of the equality constraint seems to work as a workaround for the problem at hand.

The constraint {'type': 'eq', 'fun': lambda W: sum(W) - 1} forces all N weights to sum exactly to 1. There is another way to enforce this: We can optimize only N-1 weights and constrain their sum to be < 1. Then the remaining weight is implicitly given by 1 - sum(other_weights). This requires some changes to the code:

def expand_weights(weights):
    """This function takes N-1 weights and adds the implicit Nth weight 
       so that together their sum is 1."""
    return np.append(weights, 1 - np.sum(weights))

def tracking_error_function(W, port_weights):
    weight_diff = port_weights - expand_weights(W)
    weight_diff = np.sum(weight_diff ** 2)
    return weight_diff

def total_max_weight_constraint(weights):
    weights = expand_weights(weights)
    max_weights_share = sum([i for i in weights if i > 0.045])
    max_ineq = 0.36 - max_weights_share
    return max_ineq

We simply take the original initial weights and remove the last one:

initial_weights = np.asarray(gen_initial_weights(len(portfolio_weights)))
initial_weights = initial_weights[:-1]

Finally, the constraints become:

c_ = ({'type': 'ineq', 'fun': lambda W: 1 - sum(W)},
      {'type': 'ineq', 'fun': total_max_weight_constraint})

Run the optimization and see if the constraints are satisfied:

optimized = opt.minimize(tracking_error_function, initial_weights, 
                         args=(portfolio_weights), method='SLSQP', 
                         constraints=c_, bounds=b_, 
                         options={'maxiter': 100000, 'disp': 5})

assert np.allclose(1, np.sum(expand_weights(optimized.x)))  # check equality constraint
assert total_max_weight_constraint(optimized.x) > 0  # check second constraint
like image 71
MB-F Avatar answered Nov 15 '22 05:11

MB-F