I am trying to rewrite a Matlab fmincon optimization function in Julia.
Here is the Matlab code:
function [x,fval] = example3()
x0 = [0; 0; 0; 0; 0; 0; 0; 0];
A = [];
b = [];
Ae = [1000 1000 1000 1000 -1000 -1000 -1000 -1000];
be = [100];
lb = [0; 0; 0; 0; 0; 0; 0; 0];
ub = [1; 1; 1; 1; 1; 1; 1; 1];
noncon = [];
options = optimset('fmincon');
options.Algorithm = 'interior-point';
[x,fval] = fmincon(@objfcn,x0,A,b,Ae,be,lb,ub,@noncon,options);
end
function f = objfcn(x)
% user inputs
Cr = [ 0.0064 0.00408 0.00192 0;
0.00408 0.0289 0.0204 0.0119;
0.00192 0.0204 0.0576 0.0336;
0 0.0119 0.0336 0.1225 ];
w0 = [ 0.3; 0.3; 0.2; 0.1 ];
Er = [0.05; 0.1; 0.12; 0.18];
% calculate objective function
w = w0+x(1:4)-x(5:8);
Er_p = w'*Er;
Sr_p = sqrt(w'*Cr*w);
% f = objective function
f = -Er_p/Sr_p;
end
and here is my Julia code:
using JuMP
using Ipopt
m = Model(solver=IpoptSolver())
# INPUT DATA
w0 = [ 0.3; 0.3; 0.2; 0.1 ]
Er = [0.05; 0.1; 0.12; 0.18]
Cr = [ 0.0064 0.00408 0.00192 0;
0.00408 0.0289 0.0204 0.0119;
0.00192 0.0204 0.0576 0.0336;
0 0.0119 0.0336 0.1225 ]
# VARIABLES
@defVar(m, 0 <= x[i=1:8] <= 1, start = 0.0)
@defNLExpr(w, w0+x[1:4]-x[5:8])
@defNLExpr(Er_p, w'*Er)
@defNLExpr(Sr_p, w'*Cr*w)
@defNLExpr(f, Er_p/Sr_p)
# OBJECTIVE
@setNLObjective(m, Min, f)
# CONSTRAINTS
@addConstraint(m, 1000*x[1] + 1000*x[2] + 1000*x[3] + 1000*x[4] -
1000*x[5] - 1000*x[6] - 1000*x[7] - 1000*x[8] == 100)
# SOLVE
status = solve(m)
# DISPLAY RESULTS
println("x = ", round(getValue(x),4))
println("f = ", round(getObjectiveValue(m),4))
Julia optimization works when I explicitly define the objective function in @setNLObjective however this is unsuitable as the user's input can change resulting in a different objective function, which you can see from how the objective function is created.
The issue seems to be JuMP's limitation in how the objective function can be entered in the @setNLObjective argument:
All expressions must be simple scalar operations. You cannot use dot, matrix-vector products, vector slices, etc. Translate vector operations into explicit sum{} operations.
Is there a way around this? Or are there any other packages in Julia which solve this, bearing in mind I will not have the jacobian or hessian.
Many thanks.
Working example of the Matlab code using Julia and NLopt optimization package.
using NLopt
function objective_function(x::Vector{Float64}, grad::Vector{Float64})
w0 = [ 0.3; 0.3; 0.2; 0.1 ]
Er = [0.05; 0.1; 0.12; 0.18]
Cr = [ 0.0064 0.00408 0.00192 0;
0.00408 0.0289 0.0204 0.0119;
0.00192 0.0204 0.0576 0.0336;
0 0.0119 0.0336 0.1225 ]
w = w0 + x[1:4] - x[5:8]
Er_p = w' * Er
Sr_p = sqrt(w' * Cr * w)
f = -Er_p/Sr_p
obj_func_value = f[1]
return(obj_func_value)
end
function constraint_function(x::Vector, grad::Vector)
constraintValue = 1000*x[1] + 1000*x[2] + 1000*x[3] + 1000*x[4] -
1000*x[5] - 1000*x[6] - 1000*x[7] - 1000*x[8] - 100
return constraintValue
end
opt1 = Opt(:LN_COBYLA, 8)
lower_bounds!(opt1, [0, 0, 0, 0, 0, 0, 0, 0])
upper_bounds!(opt1, [1, 1, 1, 1, 1, 1, 1, 1])
#ftol_rel!(opt1, 0.5)
#ftol_abs!(opt1, 0.5)
min_objective!(opt1, objective_function)
equality_constraint!(opt1, constraint_function)
(fObjOpt, xOpt, flag) = optimize(opt1, [0, 0, 0, 0, 0, 0, 0, 0])
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With