module concrete.ml.sklearn.xgb
Implements XGBoost models.
class XGBClassifier
Implements the XGBoost classifier.
method __init__
__init__(
n_bits: int = 6,
max_depth: Optional[int] = 3,
learning_rate: Optional[float] = 0.1,
n_estimators: Optional[int] = 20,
objective: Optional[str] = 'binary:logistic',
booster: Optional[str] = None,
tree_method: Optional[str] = None,
n_jobs: Optional[int] = None,
gamma: Optional[float] = None,
min_child_weight: Optional[float] = None,
max_delta_step: Optional[float] = None,
subsample: Optional[float] = None,
colsample_bytree: Optional[float] = None,
colsample_bylevel: Optional[float] = None,
colsample_bynode: Optional[float] = None,
reg_alpha: Optional[float] = None,
reg_lambda: Optional[float] = None,
scale_pos_weight: Optional[float] = None,
base_score: Optional[float] = None,
missing: float = nan,
num_parallel_tree: Optional[int] = None,
monotone_constraints: Optional[Dict[str, int], str] = None,
interaction_constraints: Optional[str, List[Tuple[str]]] = None,
importance_type: Optional[str] = None,
gpu_id: Optional[int] = None,
validate_parameters: Optional[bool] = None,
predictor: Optional[str] = None,
enable_categorical: bool = False,
use_label_encoder: bool = False,
random_state: Optional[RandomState, int] = None,
verbosity: Optional[int] = None
)
property onnx_model
Get the ONNX model.
.. # noqa: DAR201
Returns:
onnx.ModelProto
: the ONNX model
method post_processing
post_processing(y_preds: ndarray) → ndarray
Apply post-processing to the predictions.
Args:
y_preds
(numpy.ndarray): The predictions.
Returns:
numpy.ndarray
: The post-processed predictions.
class XGBRegressor
Implements the XGBoost regressor.
method __init__
__init__(
n_bits: int = 6,
max_depth: Optional[int] = 3,
learning_rate: Optional[float] = 0.1,
n_estimators: Optional[int] = 20,
objective: Optional[str] = 'reg:squarederror',
booster: Optional[str] = None,
tree_method: Optional[str] = None,
n_jobs: Optional[int] = None,
gamma: Optional[float] = None,
min_child_weight: Optional[float] = None,
max_delta_step: Optional[float] = None,
subsample: Optional[float] = None,
colsample_bytree: Optional[float] = None,
colsample_bylevel: Optional[float] = None,
colsample_bynode: Optional[float] = None,
reg_alpha: Optional[float] = None,
reg_lambda: Optional[float] = None,
scale_pos_weight: Optional[float] = None,
base_score: Optional[float] = None,
missing: float = nan,
num_parallel_tree: Optional[int] = None,
monotone_constraints: Optional[Dict[str, int], str] = None,
interaction_constraints: Optional[str, List[Tuple[str]]] = None,
importance_type: Optional[str] = None,
gpu_id: Optional[int] = None,
validate_parameters: Optional[bool] = None,
predictor: Optional[str] = None,
enable_categorical: bool = False,
use_label_encoder: bool = False,
random_state: Optional[RandomState, int] = None,
verbosity: Optional[int] = None
)
property onnx_model
Get the ONNX model.
.. # noqa: DAR201
Returns:
onnx.ModelProto
: the ONNX model
method fit
fit(X, y, **kwargs) → Any
Fit the tree-based estimator.
Args:
X
: training data By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series
y
(numpy.ndarray): The target data.
**kwargs
: args for super().fit
Returns:
method post_processing
post_processing(y_preds: ndarray) → ndarray
Apply post-processing to the predictions.
Args:
y_preds
(numpy.ndarray): The predictions.
Returns:
numpy.ndarray
: The post-processed predictions.