Objective: to predict the probability of a given set of class with sklearn when model inputs are int, float, and objects (according to a pandas dataframe).
I am using the following dataset from UCI Repository: Auto Dataset
I have created a pipeline which almost works:
# create transformers for the different variable types.
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
import pandas as pd
import numpy as np
data = pd.read_csv(r"C:\Auto Dataset.csv")
target = 'aspiration'
X = data.drop([target], axis = 1)
y = data[target]
integer_transformer = Pipeline(steps = [
('imputer', SimpleImputer(strategy = 'most_frequent')),
('scaler', StandardScaler())])
continuous_transformer = Pipeline(steps = [
('imputer', SimpleImputer(strategy = 'most_frequent')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps = [
('imputer', SimpleImputer(strategy = 'most_frequent')),
('lab_enc', OneHotEncoder(handle_unknown='ignore'))])
# Use the ColumnTransformer to apply the transformations to the correct columns in the dataframe.
integer_features = X.select_dtypes(include=['int64'])
continuous_features = X.select_dtypes(include=['float64'])
categorical_features = X.select_dtypes(include=['object'])
import numpy as np
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(
transformers=[
('ints', integer_transformer, integer_features),
('cont', continuous_transformer, continuous_features),
('cat', categorical_transformer, categorical_features)])
# Create a pipeline that combines the preprocessor created above with a classifier.
from sklearn.neighbors import KNeighborsClassifier
base = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', KNeighborsClassifier())])
Of course, I would like to make use of predict_proba()
which ends up giving me a bit of trouble. I tried the following:
model = base.fit(X,y )
preds = model.predict_proba(X)
However, I received an error:
ValueError: No valid specification of the columns. Only a scalar, list or slice of all integers or all strings, or boolean mask is allowed
Of course, here is the full traceback:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-a1a29a8b3623> in <module>()
----> 1 base_learner.fit(X)
D:\Anaconda3\lib\site-packages\sklearn\pipeline.py in fit(self, X, y, **fit_params)
263 This estimator
264 """
--> 265 Xt, fit_params = self._fit(X, y, **fit_params)
266 if self._final_estimator is not None:
267 self._final_estimator.fit(Xt, y, **fit_params)
D:\Anaconda3\lib\site-packages\sklearn\pipeline.py in _fit(self, X, y, **fit_params)
228 Xt, fitted_transformer = fit_transform_one_cached(
229 cloned_transformer, Xt, y, None,
--> 230 **fit_params_steps[name])
231 # Replace the transformer of the step with the fitted
232 # transformer. This is necessary when loading the transformer
D:\Anaconda3\lib\site-packages\sklearn\externals\joblib\memory.py in __call__(self, *args, **kwargs)
327
328 def __call__(self, *args, **kwargs):
--> 329 return self.func(*args, **kwargs)
330
331 def call_and_shelve(self, *args, **kwargs):
D:\Anaconda3\lib\site-packages\sklearn\pipeline.py in _fit_transform_one(transformer, X, y, weight, **fit_params)
612 def _fit_transform_one(transformer, X, y, weight, **fit_params):
613 if hasattr(transformer, 'fit_transform'):
--> 614 res = transformer.fit_transform(X, y, **fit_params)
615 else:
616 res = transformer.fit(X, y, **fit_params).transform(X)
D:\Anaconda3\lib\site-packages\sklearn\compose\_column_transformer.py in fit_transform(self, X, y)
445 self._validate_transformers()
446 self._validate_column_callables(X)
--> 447 self._validate_remainder(X)
448
449 result = self._fit_transform(X, y, _fit_transform_one)
D:\Anaconda3\lib\site-packages\sklearn\compose\_column_transformer.py in _validate_remainder(self, X)
299 cols = []
300 for columns in self._columns:
--> 301 cols.extend(_get_column_indices(X, columns))
302 remaining_idx = sorted(list(set(range(n_columns)) - set(cols))) or None
303
D:\Anaconda3\lib\site-packages\sklearn\compose\_column_transformer.py in _get_column_indices(X, key)
654 return list(np.arange(n_columns)[key])
655 else:
--> 656 raise ValueError("No valid specification of the columns. Only a "
657 "scalar, list or slice of all integers or all "
658 "strings, or boolean mask is allowed")
Not sure what I am missing, but would appreciate any help possible.
EDIT: I am using sklearn version 0.20.
The error message points you in the right direction. The columns should be specified by name or index, but you pass the data columns as a DataFrame.
df.select_dtypes()
doesn't output column indices. It outputs a subset of the DataFrame with the matched columns. Your code should be
# Use the ColumnTransformer to apply the transformations to the correct columns in the dataframe.
integer_features = list(X.columns[X.dtypes == 'int64'])
continuous_features = list(X.columns[X.dtypes == 'float64'])
categorical_features = list(X.columns[X.dtypes == 'object'])
so that the, for example, the integer columns get passed on as a list ['curb-weight', 'engine-size', 'city-mpg', 'highway-mpg']
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With