I have created a python code that solves a group lasso penalized linear model. For those of you not used to work with these models, the basic idea is that you give as input a dataset (x) and a response variable (y), as well as the value of a parameter (lambda1), varying the value of this parameter changes the solution of the model. So I decided to use the multiprocessing library and solve different models (associated to different parameter values). I created a python file called "model.py" in which there are the following functions:
# -*- coding: utf-8 -*-
from __future__ import division
import functools
import multiprocessing as mp
import numpy as np
from cvxpy import *
def lm_gl_preprocessing(x, y, index, lambda1=None):
lambda_vector = [lambda1]
m = x.shape[1]
n = x.shape[0]
lambda_param = Parameter(sign="positive")
m = m+1
index = np.append(0, index)
x = np.c_[np.ones(n), x]
group_sizes = []
beta_var = []
unique_index = np.unique(index)
for idx in unique_index:
group_sizes.append(len(np.where(index == idx)[0]))
beta_var.append(Variable(len(np.where(index == idx)[0])))
num_groups = len(group_sizes)
group_lasso_penalization = 0
model_prediction = x[:, np.where(index == unique_index[0])[0]] * beta_var[0]
for i in range(1, num_groups):
model_prediction += x[:, np.where(index == unique_index[i])[0]] * beta_var[i]
group_lasso_penalization += sqrt(group_sizes[i]) * norm(beta_var[i], 2)
lm_penalization = (1.0/n) * sum_squares(y - model_prediction)
objective = Minimize(lm_penalization + (lambda_param * group_lasso_penalization))
problem = Problem(objective)
response = {'problem': problem, 'beta_var': beta_var, 'lambda_param': lambda_param, 'lambda_vector': lambda_vector}
return response
def solver(problem, beta_var, lambda_param, lambda_vector):
beta_sol_list = []
for i in range(len(lambda_vector)):
lambda_param.value = lambda_vector[i]
problem.solve(solver=ECOS)
beta_sol = np.asarray(np.row_stack([b.value for b in beta_var])).flatten()
beta_sol_list.append(beta_sol)
return beta_sol_list
def parallel_solver(problem, beta_var, lambda_param, lambda_vector):
# Divide parameter vector into chunks to be executed in parallel
num_chunks = mp.cpu_count()
chunks = np.array_split(lambda_vector, num_chunks)
# Solve problem in parallel
pool = mp.Pool(num_chunks)
global_results = pool.map(functools.partial(solver, problem, beta_var, lambda_param), chunks)
pool.close()
pool.join()
return global_results
If, in the python console, I start runnig the parallel solver, it gives a solution. This solution is different than the one provided by the sequential solver. If I restart the python console and start runnig the sequential solver, and then I run the parallel solver, the parallel solver gives the same solution as the sequential solver. I will show:
from __future__ import division
from sklearn.datasets import load_boston
import numpy as np
import model as t
boston = load_boston()
x = boston.data
y = boston.target
index = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5])
lambda1 = 1e-3
r1 = t.lm_gl_preprocessing(x=x, y=y, index=index, lambda1=lambda1)
s_parallel_1 = t.parallel_solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_parallel_1)
[[array([ 4.61648376e+01, -1.22394832e-04, 0.00000000e+00,
0.00000000e+00, 1.37065733e-04, 1.51910696e-03,
0.00000000e+00, 1.51910696e-03, 0.00000000e+00,
7.00079603e-03, 1.52776114e-03, -8.67357376e-01,
7.16429750e-03, -8.67357376e-01])], [], [], []]
s_1 = t.solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_1)
[array([ 3.62813738e+01, -1.06995338e-01, 4.64210526e-02,
1.97112192e-02, 2.68475527e+00, -1.75142155e+01,
3.80741843e+00, 5.14842823e-04, -1.47105323e+00,
3.04949407e-01, -1.23508259e-02, -9.50143293e-01,
9.40708993e-03, -5.25758097e-01])]
#####################################################
r1 = t.lm_gl_preprocessing(x=x, y=y, index=index, lambda1=lambda1)
s_1 = t.solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_1)
[array([ 3.62813738e+01, -1.06995338e-01, 4.64210526e-02,
1.97112192e-02, 2.68475527e+00, -1.75142155e+01,
3.80741843e+00, 5.14842823e-04, -1.47105323e+00,
3.04949407e-01, -1.23508259e-02, -9.50143293e-01,
9.40708993e-03, -5.25758097e-01])]
s_parallel_1 = t.parallel_solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_parallel_1)
[[array([ 3.62813738e+01, -1.06995338e-01, 4.64210526e-02,
1.97112192e-02, 2.68475527e+00, -1.75142155e+01,
3.80741843e+00, 5.14842823e-04, -1.47105323e+00,
3.04949407e-01, -1.23508259e-02, -9.50143293e-01,
9.40708993e-03, -5.25758097e-01])], [], [], []]
PS: I know that in this example I am using parallel programming just to solve one model with one possible parameter value, but this is just a little example designed to show the difference of solutions provided by sequential and parallel programming here. I would thank any hint since I am completely lost here.
If I execute your code I get the same result in all the cases. This is the code that I am running (I merged the 2 files):
from __future__ import division
import functools
import multiprocessing as mp
import numpy as np
from cvxpy import *
from sklearn.datasets import load_boston
def lm_gl_preprocessing(x, y, index, lambda1=None):
lambda_vector = [lambda1]
m = x.shape[1]
n = x.shape[0]
lambda_param = Parameter(sign="positive")
m = m+1
index = np.append(0, index)
x = np.c_[np.ones(n), x]
group_sizes = []
beta_var = []
unique_index = np.unique(index)
for idx in unique_index:
group_sizes.append(len(np.where(index == idx)[0]))
beta_var.append(Variable(len(np.where(index == idx)[0])))
num_groups = len(group_sizes)
group_lasso_penalization = 0
model_prediction = x[:, np.where(index == unique_index[0])[0]] * beta_var[0]
for i in range(1, num_groups):
model_prediction += x[:, np.where(index == unique_index[i])[0]] * beta_var[i]
group_lasso_penalization += sqrt(group_sizes[i]) * norm(beta_var[i], 2)
lm_penalization = (1.0/n) * sum_squares(y - model_prediction)
objective = Minimize(lm_penalization + (lambda_param * group_lasso_penalization))
problem = Problem(objective)
response = {'problem': problem, 'beta_var': beta_var, 'lambda_param': lambda_param, 'lambda_vector': lambda_vector}
return response
def solver(problem, beta_var, lambda_param, lambda_vector):
beta_sol_list = []
for i in range(len(lambda_vector)):
lambda_param.value = lambda_vector[i]
problem.solve(solver=ECOS)
beta_sol = np.asarray(np.row_stack([b.value for b in beta_var])).flatten()
beta_sol_list.append(beta_sol)
return beta_sol_list
def parallel_solver(problem, beta_var, lambda_param, lambda_vector):
# Divide parameter vector into chunks to be executed in parallel
num_chunks = mp.cpu_count()
chunks = np.array_split(lambda_vector, num_chunks)
# Solve problem in parallel
pool = mp.Pool(num_chunks)
global_results = pool.map(functools.partial(solver, problem, beta_var, lambda_param), chunks)
pool.close()
pool.join()
return global_results
if __name__ == "__main__":
boston = load_boston()
x = boston.data
y = boston.target
index = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5])
lambda1 = 1e-3
r1 = lm_gl_preprocessing(x=x, y=y, index=index, lambda1=lambda1)
s_parallel_1 = parallel_solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_parallel_1)
r1 = lm_gl_preprocessing(x=x, y=y, index=index, lambda1=lambda1)
s_1 = solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_1)
print ("#####################################################")
r1 = lm_gl_preprocessing(x=x, y=y, index=index, lambda1=lambda1)
s_1 = solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_1)
r1 = lm_gl_preprocessing(x=x, y=y, index=index, lambda1=lambda1)
s_parallel_1 = parallel_solver(problem=r1['problem'], beta_var=r1['beta_var'], lambda_param=r1['lambda_param'], lambda_vector=r1['lambda_vector'])
print(s_parallel_1)
and output:
[[array([ 3.62813738e+01, -1.06995338e-01, 4.64210526e-02, 1.97112192e-02,
2.68475527e+00, -1.75142155e+01, 3.80741843e+00, 5.14842823e-04,
-1.47105323e+00, 3.04949407e-01, -1.23508259e-02, -9.50143293e-01,
9.40708993e-03, -5.25758097e-01])], [], [], []]
[array([ 3.62813738e+01, -1.06995338e-01, 4.64210526e-02, 1.97112192e-02,
2.68475527e+00, -1.75142155e+01, 3.80741843e+00, 5.14842823e-04,
-1.47105323e+00, 3.04949407e-01, -1.23508259e-02, -9.50143293e-01,
9.40708993e-03, -5.25758097e-01])]
#####################################################
[array([ 3.62813738e+01, -1.06995338e-01, 4.64210526e-02, 1.97112192e-02,
2.68475527e+00, -1.75142155e+01, 3.80741843e+00, 5.14842823e-04,
-1.47105323e+00, 3.04949407e-01, -1.23508259e-02, -9.50143293e-01,
9.40708993e-03, -5.25758097e-01])]
[[array([ 3.62813738e+01, -1.06995338e-01, 4.64210526e-02, 1.97112192e-02,
2.68475527e+00, -1.75142155e+01, 3.80741843e+00, 5.14842823e-04,
-1.47105323e+00, 3.04949407e-01, -1.23508259e-02, -9.50143293e-01,
9.40708993e-03, -5.25758097e-01])], [], [], []]
As you can see, I have the same number of CPUs (4).
My environment is Python2.7 on Linux and these are the versions of the relevant packages:
>>> import sklearn
>>> sklearn.__version__
'0.19.2'
>>> import scipy
>>> scipy.__version__
'1.1.0'
>>> import numpy
>>> numpy.__version__
'1.15.2'
>>> import cvxpy
>>> cvxpy.__version__
'0.4.0'
>>> import multiprocessing
>>> multiprocessing.__version__
'0.70a1'
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With