Skip to content

elapid.features

Functions to transform covariate data into complex model features.

CategoricalTransformer

Bases: BaseEstimator, TransformerMixin

Applies one-hot encoding to categorical covariate datasets.

Source code in elapid/features.py
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
class CategoricalTransformer(BaseEstimator, TransformerMixin):
    """Applies one-hot encoding to categorical covariate datasets."""

    def __init__(self):
        self.estimators_ = None

    def fit(self, x: ArrayLike) -> "CategoricalTransformer":
        """Compute the minimum and maximum for scaling.

        Args:
            x: array-like of shape (n_samples, n_features)
                The data used to compute the per-feature minimum and maximum
                used for later scaling along the features axis.

        Returns:
            self. Returns the transformer with fitted parameters.
        """
        self.estimators_ = []
        x = np.array(x)
        if x.ndim == 1:
            estimator = OneHotEncoder(dtype=np.uint8, sparse=False)
            self.estimators_.append(estimator.fit(x.reshape(-1, 1)))
        else:
            nrows, ncols = x.shape
            for col in range(ncols):
                xsub = x[:, col].reshape(-1, 1)
                estimator = OneHotEncoder(dtype=np.uint8, sparse=False)
                self.estimators_.append(estimator.fit(xsub))

        return self

    def transform(self, x: ArrayLike) -> np.ndarray:
        """Scale covariates according to the feature range.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data that will be transformed.

        Returns:
            ndarray with transformed data.
        """
        x = np.array(x)
        if x.ndim == 1:
            estimator = self.estimators_[0]
            return estimator.transform(x.reshape(-1, 1))
        else:
            class_data = []
            nrows, ncols = x.shape
            for col in range(ncols):
                xsub = x[:, col].reshape(-1, 1)
                estimator = self.estimators_[col]
                class_data.append(estimator.transform(xsub))
            return np.concatenate(class_data, axis=1)

    def fit_transform(self, x: ArrayLike) -> np.ndarray:
        """Fits scaler to x and returns transformed features.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data to fit the scaler and to transform.

        Returns:
            ndarray with transformed data.
        """
        self.fit(x)
        return self.transform(x)

fit(x)

Compute the minimum and maximum for scaling.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.

required

Returns:

Type Description
CategoricalTransformer

self. Returns the transformer with fitted parameters.

Source code in elapid/features.py
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
def fit(self, x: ArrayLike) -> "CategoricalTransformer":
    """Compute the minimum and maximum for scaling.

    Args:
        x: array-like of shape (n_samples, n_features)
            The data used to compute the per-feature minimum and maximum
            used for later scaling along the features axis.

    Returns:
        self. Returns the transformer with fitted parameters.
    """
    self.estimators_ = []
    x = np.array(x)
    if x.ndim == 1:
        estimator = OneHotEncoder(dtype=np.uint8, sparse=False)
        self.estimators_.append(estimator.fit(x.reshape(-1, 1)))
    else:
        nrows, ncols = x.shape
        for col in range(ncols):
            xsub = x[:, col].reshape(-1, 1)
            estimator = OneHotEncoder(dtype=np.uint8, sparse=False)
            self.estimators_.append(estimator.fit(xsub))

    return self

fit_transform(x)

Fits scaler to x and returns transformed features.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data to fit the scaler and to transform.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
386
387
388
389
390
391
392
393
394
395
396
397
def fit_transform(self, x: ArrayLike) -> np.ndarray:
    """Fits scaler to x and returns transformed features.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data to fit the scaler and to transform.

    Returns:
        ndarray with transformed data.
    """
    self.fit(x)
    return self.transform(x)

transform(x)

Scale covariates according to the feature range.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data that will be transformed.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
def transform(self, x: ArrayLike) -> np.ndarray:
    """Scale covariates according to the feature range.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data that will be transformed.

    Returns:
        ndarray with transformed data.
    """
    x = np.array(x)
    if x.ndim == 1:
        estimator = self.estimators_[0]
        return estimator.transform(x.reshape(-1, 1))
    else:
        class_data = []
        nrows, ncols = x.shape
        for col in range(ncols):
            xsub = x[:, col].reshape(-1, 1)
            estimator = self.estimators_[col]
            class_data.append(estimator.transform(xsub))
        return np.concatenate(class_data, axis=1)

CumulativeTransformer

Bases: QuantileTransformer

Applies a percentile-based transform to estimate cumulative suitability.

Source code in elapid/features.py
400
401
402
403
404
class CumulativeTransformer(QuantileTransformer):
    """Applies a percentile-based transform to estimate cumulative suitability."""

    def __init__(self):
        super().__init__(n_quantiles=100, output_distribution="uniform")

FeaturesMixin

Methods for formatting x data and labels

Source code in elapid/features.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
class FeaturesMixin:
    """Methods for formatting x data and labels"""

    def _format_covariate_data(self, x: ArrayLike) -> Tuple[np.array, np.array]:
        """Reads input x data and formats it to consistent array dtypes.

        Args:
            x: array-like of shape (n_samples, n_features)

        Returns:
            (continuous, categorical) tuple of ndarrays with continuous and
                categorical covariate data.
        """
        if isinstance(x, np.ndarray):
            if self.categorical_ is None:
                con = x
                cat = None
            else:
                con = x[:, self.continuous_]
                cat = x[:, self.categorical_]

        elif isinstance(x, pd.DataFrame):
            con = x[self.continuous_pd_].to_numpy()
            if len(self.categorical_pd_) > 0:
                cat = x[self.categorical_pd_].to_numpy()
            else:
                cat = None

        else:
            raise TypeError(f"Unsupported x dtype: {type(x)}. Must be pd.DataFrame or np.array")

        return con, cat

    def _format_labels_and_dtypes(self, x: ArrayLike, categorical: list = None, labels: list = None) -> None:
        """Read input x data and lists of categorical data indices and band
            labels to format and store this info for later indexing.

        Args:
            s: array-like of shape (n_samples, n_features)
            categorical: indices indicating which x columns are categorical
            labels: covariate column labels. ignored if x is a pandas DataFrame
        """
        if isinstance(x, np.ndarray):
            nrows, ncols = x.shape
            if categorical is None:
                continuous = list(range(ncols))
            else:
                continuous = list(set(range(ncols)).difference(set(categorical)))
            self.labels_ = labels or make_band_labels(ncols)
            self.categorical_ = categorical
            self.continuous_ = continuous

        elif isinstance(x, pd.DataFrame):
            x.drop(["geometry"], axis=1, errors="ignore", inplace=True)
            self.labels_ = labels or list(x.columns)

            # store both pandas and numpy indexing of these values
            self.continuous_pd_ = list(x.select_dtypes(exclude="category").columns)
            self.categorical_pd_ = list(x.select_dtypes(include="category").columns)

            all_columns = list(x.columns)
            self.continuous_ = [all_columns.index(item) for item in self.continuous_pd_ if item in all_columns]
            if len(self.categorical_pd_) != 0:
                self.categorical_ = [all_columns.index(item) for item in self.categorical_pd_ if item in all_columns]
            else:
                self.categorical_ = None

HingeTransformer

Bases: BaseEstimator, TransformerMixin

Fits hinge transformations to an array of covariates.

Source code in elapid/features.py
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
class HingeTransformer(BaseEstimator, TransformerMixin):
    """Fits hinge transformations to an array of covariates."""

    def __init__(self, n_hinges: int = MaxentConfig.n_hinge_features):
        self.n_hinges = n_hinges
        self.mins_ = None
        self.maxs_ = None
        self.hinge_indices_ = None

    def fit(self, x: ArrayLike) -> "HingeTransformer":
        """Compute the minimum and maximum for scaling.

        Args:
            x: array-like of shape (n_samples, n_features)
                The data used to compute the per-feature minimum and maximum
                used for later scaling along the features axis.

        Returns:
            self. Updatesd transformer with fitted parameters.
        """
        x = np.array(x)
        self.mins_ = x.min(axis=0)
        self.maxs_ = x.max(axis=0)
        self.hinge_indices_ = np.linspace(self.mins_, self.maxs_, self.n_hinges)

        return self

    def transform(self, x: ArrayLike) -> np.ndarray:
        """Scale covariates according to the feature range.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data that will be transformed.

        Returns:
            ndarray with transformed data.
        """
        x = np.array(x)
        xarr = repeat_array(x, self.n_hinges - 1, axis=-1)
        lharr = repeat_array(self.hinge_indices_[:-1].transpose(), len(x), axis=0)
        rharr = repeat_array(self.hinge_indices_[1:].transpose(), len(x), axis=0)
        lh = left_hinge(xarr, lharr, self.maxs_)
        rh = right_hinge(xarr, self.mins_, rharr)
        return np.concatenate((lh, rh), axis=2).reshape(x.shape[0], -1)

    def fit_transform(self, x: ArrayLike) -> np.ndarray:
        """Fits scaler to x and returns transformed features.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data to fit the scaler and to transform.

        Returns:
            ndarray with transformed data.
        """
        self.fit(x)
        return self.transform(x)

fit(x)

Compute the minimum and maximum for scaling.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.

required

Returns:

Type Description
HingeTransformer

self. Updatesd transformer with fitted parameters.

Source code in elapid/features.py
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
def fit(self, x: ArrayLike) -> "HingeTransformer":
    """Compute the minimum and maximum for scaling.

    Args:
        x: array-like of shape (n_samples, n_features)
            The data used to compute the per-feature minimum and maximum
            used for later scaling along the features axis.

    Returns:
        self. Updatesd transformer with fitted parameters.
    """
    x = np.array(x)
    self.mins_ = x.min(axis=0)
    self.maxs_ = x.max(axis=0)
    self.hinge_indices_ = np.linspace(self.mins_, self.maxs_, self.n_hinges)

    return self

fit_transform(x)

Fits scaler to x and returns transformed features.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data to fit the scaler and to transform.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
318
319
320
321
322
323
324
325
326
327
328
329
def fit_transform(self, x: ArrayLike) -> np.ndarray:
    """Fits scaler to x and returns transformed features.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data to fit the scaler and to transform.

    Returns:
        ndarray with transformed data.
    """
    self.fit(x)
    return self.transform(x)

transform(x)

Scale covariates according to the feature range.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data that will be transformed.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
def transform(self, x: ArrayLike) -> np.ndarray:
    """Scale covariates according to the feature range.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data that will be transformed.

    Returns:
        ndarray with transformed data.
    """
    x = np.array(x)
    xarr = repeat_array(x, self.n_hinges - 1, axis=-1)
    lharr = repeat_array(self.hinge_indices_[:-1].transpose(), len(x), axis=0)
    rharr = repeat_array(self.hinge_indices_[1:].transpose(), len(x), axis=0)
    lh = left_hinge(xarr, lharr, self.maxs_)
    rh = right_hinge(xarr, self.mins_, rharr)
    return np.concatenate((lh, rh), axis=2).reshape(x.shape[0], -1)

LinearTransformer

Bases: MinMaxScaler

Applies linear feature transformations to rescale features from 0-1.

Source code in elapid/features.py
83
84
85
86
87
88
89
90
91
92
93
class LinearTransformer(MinMaxScaler):
    """Applies linear feature transformations to rescale features from 0-1."""

    def __init__(
        self,
        clamp: bool = MaxentConfig.clamp,
        feature_range: Tuple[float, float] = (0.0, 1.0),
    ):
        self.clamp = clamp
        self.feature_range = feature_range
        super().__init__(clip=clamp, feature_range=feature_range)

MaxentFeatureTransformer

Bases: BaseEstimator, TransformerMixin, FeaturesMixin

Transforms covariate data into maxent-format feature data.

Source code in elapid/features.py
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
class MaxentFeatureTransformer(BaseEstimator, TransformerMixin, FeaturesMixin):
    """Transforms covariate data into maxent-format feature data."""

    def __init__(
        self,
        feature_types: Union[str, list] = MaxentConfig.feature_types,
        clamp: bool = MaxentConfig.clamp,
        n_hinge_features: int = MaxentConfig.n_hinge_features,
        n_threshold_features: int = MaxentConfig.n_threshold_features,
    ):
        """Computes features based on the maxent feature types specified (like linear, quadratic, hinge).

        Args:
            feature_types: list of maxent features to generate.
            clamp: set feature values to global mins/maxs during prediction
            n_hinge_features: number of hinge knots to generate
            n_threshold_features: nuber of threshold features to generate
        """
        self.feature_types = feature_types
        self.clamp = clamp
        self.n_hinge_features = n_hinge_features
        self.n_threshold_features = n_threshold_features
        self.categorical_ = None
        self.continuous_ = None
        self.categorical_pd_ = None
        self.continuous_pd_ = None
        self.labels_ = None
        self.feature_names_ = None
        self.estimators_ = {
            "linear": None,
            "quadratic": None,
            "product": None,
            "threshold": None,
            "hinge": None,
            "categorical": None,
        }

    def fit(self, x: ArrayLike, categorical: list = None, labels: list = None) -> "MaxentFeatureTransformer":
        """Compute the minimum and maximum for scaling.

        Args:
            x: array-like of shape (n_samples, n_features)
                The data used to compute the per-feature minimum and maximum
                used for later scaling along the features axis.
            categorical: indices indicating which x columns are categorical
            labels: covariate column labels. ignored if x is a pandas DataFrame

        Returns:
            self. Returns the transformer with fitted parameters.
        """
        self.feature_types = validate_feature_types(self.feature_types)
        self.clamp = validate_boolean(self.clamp)
        self.n_hinge_features = validate_numeric_scalar(self.n_hinge_features)
        self.n_threshold_features = validate_numeric_scalar(self.n_threshold_features)

        self._format_labels_and_dtypes(x, categorical=categorical, labels=labels)
        con, cat = self._format_covariate_data(x)
        nrows, ncols = con.shape

        feature_names = []
        if "linear" in self.feature_types:
            estimator = LinearTransformer(clamp=self.clamp)
            estimator.fit(con)
            self.estimators_["linear"] = estimator
            feature_names += ["linear"] * estimator.n_features_in_

        if "quadratic" in self.feature_types:
            estimator = QuadraticTransformer(clamp=self.clamp)
            estimator.fit(con)
            self.estimators_["quadratic"] = estimator
            feature_names += ["quadratic"] * estimator.estimator.n_features_in_

        if "product" in self.feature_types:
            estimator = ProductTransformer(clamp=self.clamp)
            estimator.fit(con)
            self.estimators_["product"] = estimator
            feature_names += ["product"] * estimator.estimator.n_features_in_

        if "threshold" in self.feature_types:
            estimator = ThresholdTransformer(n_thresholds=self.n_threshold_features)
            estimator.fit(con)
            self.estimators_["threshold"] = estimator
            feature_names += ["threshold"] * (estimator.n_thresholds * ncols)

        if "hinge" in self.feature_types:
            estimator = HingeTransformer(n_hinges=self.n_hinge_features)
            estimator.fit(con)
            self.estimators_["hinge"] = estimator
            feature_names += ["hinge"] * ((estimator.n_hinges - 1) * 2 * ncols)

        if cat is not None:
            estimator = CategoricalTransformer()
            estimator.fit(cat)
            self.estimators_["categorical"] = estimator
            for est in estimator.estimators_:
                feature_names += ["categorical"] * len(est.categories_[0])

        self.feature_names_ = feature_names

        return self

    def transform(self, x: ArrayLike) -> np.ndarray:
        """Scale covariates according to the feature range.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data that will be transformed.

        Returns:
            ndarray with transformed data.
        """
        con, cat = self._format_covariate_data(x)
        features = []

        if "linear" in self.feature_types:
            features.append(self.estimators_["linear"].transform(con))

        if "quadratic" in self.feature_types:
            features.append(self.estimators_["quadratic"].transform(con))

        if "product" in self.feature_types:
            features.append(self.estimators_["product"].transform(con))

        if "threshold" in self.feature_types:
            features.append(self.estimators_["threshold"].transform(con))

        if "hinge" in self.feature_types:
            features.append(self.estimators_["hinge"].transform(con))

        if cat is not None:
            features.append(self.estimators_["categorical"].transform(cat))

        return np.concatenate(features, axis=1)

    def fit_transform(self, x: ArrayLike, categorical: list = None, labels: list = None) -> np.ndarray:
        """Fits scaler to x and returns transformed features.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data to fit the scaler and to transform.

        Returns:
            ndarray with transformed data.
        """
        self.fit(x, categorical=categorical, labels=labels)
        return self.transform(x)

__init__(feature_types=MaxentConfig.feature_types, clamp=MaxentConfig.clamp, n_hinge_features=MaxentConfig.n_hinge_features, n_threshold_features=MaxentConfig.n_threshold_features)

Computes features based on the maxent feature types specified (like linear, quadratic, hinge).

Parameters:

Name Type Description Default
feature_types Union[str, list]

list of maxent features to generate.

MaxentConfig.feature_types
clamp bool

set feature values to global mins/maxs during prediction

MaxentConfig.clamp
n_hinge_features int

number of hinge knots to generate

MaxentConfig.n_hinge_features
n_threshold_features int

nuber of threshold features to generate

MaxentConfig.n_threshold_features
Source code in elapid/features.py
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
def __init__(
    self,
    feature_types: Union[str, list] = MaxentConfig.feature_types,
    clamp: bool = MaxentConfig.clamp,
    n_hinge_features: int = MaxentConfig.n_hinge_features,
    n_threshold_features: int = MaxentConfig.n_threshold_features,
):
    """Computes features based on the maxent feature types specified (like linear, quadratic, hinge).

    Args:
        feature_types: list of maxent features to generate.
        clamp: set feature values to global mins/maxs during prediction
        n_hinge_features: number of hinge knots to generate
        n_threshold_features: nuber of threshold features to generate
    """
    self.feature_types = feature_types
    self.clamp = clamp
    self.n_hinge_features = n_hinge_features
    self.n_threshold_features = n_threshold_features
    self.categorical_ = None
    self.continuous_ = None
    self.categorical_pd_ = None
    self.continuous_pd_ = None
    self.labels_ = None
    self.feature_names_ = None
    self.estimators_ = {
        "linear": None,
        "quadratic": None,
        "product": None,
        "threshold": None,
        "hinge": None,
        "categorical": None,
    }

fit(x, categorical=None, labels=None)

Compute the minimum and maximum for scaling.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.

required
categorical list

indices indicating which x columns are categorical

None
labels list

covariate column labels. ignored if x is a pandas DataFrame

None

Returns:

Type Description
MaxentFeatureTransformer

self. Returns the transformer with fitted parameters.

Source code in elapid/features.py
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
def fit(self, x: ArrayLike, categorical: list = None, labels: list = None) -> "MaxentFeatureTransformer":
    """Compute the minimum and maximum for scaling.

    Args:
        x: array-like of shape (n_samples, n_features)
            The data used to compute the per-feature minimum and maximum
            used for later scaling along the features axis.
        categorical: indices indicating which x columns are categorical
        labels: covariate column labels. ignored if x is a pandas DataFrame

    Returns:
        self. Returns the transformer with fitted parameters.
    """
    self.feature_types = validate_feature_types(self.feature_types)
    self.clamp = validate_boolean(self.clamp)
    self.n_hinge_features = validate_numeric_scalar(self.n_hinge_features)
    self.n_threshold_features = validate_numeric_scalar(self.n_threshold_features)

    self._format_labels_and_dtypes(x, categorical=categorical, labels=labels)
    con, cat = self._format_covariate_data(x)
    nrows, ncols = con.shape

    feature_names = []
    if "linear" in self.feature_types:
        estimator = LinearTransformer(clamp=self.clamp)
        estimator.fit(con)
        self.estimators_["linear"] = estimator
        feature_names += ["linear"] * estimator.n_features_in_

    if "quadratic" in self.feature_types:
        estimator = QuadraticTransformer(clamp=self.clamp)
        estimator.fit(con)
        self.estimators_["quadratic"] = estimator
        feature_names += ["quadratic"] * estimator.estimator.n_features_in_

    if "product" in self.feature_types:
        estimator = ProductTransformer(clamp=self.clamp)
        estimator.fit(con)
        self.estimators_["product"] = estimator
        feature_names += ["product"] * estimator.estimator.n_features_in_

    if "threshold" in self.feature_types:
        estimator = ThresholdTransformer(n_thresholds=self.n_threshold_features)
        estimator.fit(con)
        self.estimators_["threshold"] = estimator
        feature_names += ["threshold"] * (estimator.n_thresholds * ncols)

    if "hinge" in self.feature_types:
        estimator = HingeTransformer(n_hinges=self.n_hinge_features)
        estimator.fit(con)
        self.estimators_["hinge"] = estimator
        feature_names += ["hinge"] * ((estimator.n_hinges - 1) * 2 * ncols)

    if cat is not None:
        estimator = CategoricalTransformer()
        estimator.fit(cat)
        self.estimators_["categorical"] = estimator
        for est in estimator.estimators_:
            feature_names += ["categorical"] * len(est.categories_[0])

    self.feature_names_ = feature_names

    return self

fit_transform(x, categorical=None, labels=None)

Fits scaler to x and returns transformed features.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data to fit the scaler and to transform.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
541
542
543
544
545
546
547
548
549
550
551
552
def fit_transform(self, x: ArrayLike, categorical: list = None, labels: list = None) -> np.ndarray:
    """Fits scaler to x and returns transformed features.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data to fit the scaler and to transform.

    Returns:
        ndarray with transformed data.
    """
    self.fit(x, categorical=categorical, labels=labels)
    return self.transform(x)

transform(x)

Scale covariates according to the feature range.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data that will be transformed.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
def transform(self, x: ArrayLike) -> np.ndarray:
    """Scale covariates according to the feature range.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data that will be transformed.

    Returns:
        ndarray with transformed data.
    """
    con, cat = self._format_covariate_data(x)
    features = []

    if "linear" in self.feature_types:
        features.append(self.estimators_["linear"].transform(con))

    if "quadratic" in self.feature_types:
        features.append(self.estimators_["quadratic"].transform(con))

    if "product" in self.feature_types:
        features.append(self.estimators_["product"].transform(con))

    if "threshold" in self.feature_types:
        features.append(self.estimators_["threshold"].transform(con))

    if "hinge" in self.feature_types:
        features.append(self.estimators_["hinge"].transform(con))

    if cat is not None:
        features.append(self.estimators_["categorical"].transform(cat))

    return np.concatenate(features, axis=1)

ProductTransformer

Bases: BaseEstimator, TransformerMixin

Computes the column-wise product of an array of input features, rescaling from 0-1.

Source code in elapid/features.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
class ProductTransformer(BaseEstimator, TransformerMixin):
    """Computes the column-wise product of an array of input features, rescaling from 0-1."""

    def __init__(
        self,
        clamp: bool = MaxentConfig.clamp,
        feature_range: Tuple[float, float] = (0.0, 1.0),
    ):
        self.clamp = clamp
        self.feature_range = feature_range
        self.estimator = None

    def fit(self, x: ArrayLike) -> "ProductTransformer":
        """Compute the minimum and maximum for scaling.

        Args:
            x: array-like of shape (n_samples, n_features)
                The data used to compute the per-feature minimum and maximum
                used for later scaling along the features axis.

        Returns:
            self. Returns the transformer with fitted parameters.
        """
        self.estimator = MinMaxScaler(clip=self.clamp, feature_range=self.feature_range)
        self.estimator.fit(column_product(np.array(x)))

        return self

    def transform(self, x: ArrayLike) -> np.ndarray:
        """Scale covariates according to the feature range.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data that will be transformed.

        Returns:
            ndarray with transformed data.
        """
        return self.estimator.transform(column_product(np.array(x)))

    def fit_transform(self, x: ArrayLike) -> np.ndarray:
        """Fits scaler to x and returns transformed features.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data to fit the scaler and to transform.

        Returns:
            ndarray with transformed data.
        """
        self.fit(x)
        return self.transform(x)

fit(x)

Compute the minimum and maximum for scaling.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.

required

Returns:

Type Description
ProductTransformer

self. Returns the transformer with fitted parameters.

Source code in elapid/features.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
def fit(self, x: ArrayLike) -> "ProductTransformer":
    """Compute the minimum and maximum for scaling.

    Args:
        x: array-like of shape (n_samples, n_features)
            The data used to compute the per-feature minimum and maximum
            used for later scaling along the features axis.

    Returns:
        self. Returns the transformer with fitted parameters.
    """
    self.estimator = MinMaxScaler(clip=self.clamp, feature_range=self.feature_range)
    self.estimator.fit(column_product(np.array(x)))

    return self

fit_transform(x)

Fits scaler to x and returns transformed features.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data to fit the scaler and to transform.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
202
203
204
205
206
207
208
209
210
211
212
213
def fit_transform(self, x: ArrayLike) -> np.ndarray:
    """Fits scaler to x and returns transformed features.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data to fit the scaler and to transform.

    Returns:
        ndarray with transformed data.
    """
    self.fit(x)
    return self.transform(x)

transform(x)

Scale covariates according to the feature range.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data that will be transformed.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
190
191
192
193
194
195
196
197
198
199
200
def transform(self, x: ArrayLike) -> np.ndarray:
    """Scale covariates according to the feature range.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data that will be transformed.

    Returns:
        ndarray with transformed data.
    """
    return self.estimator.transform(column_product(np.array(x)))

QuadraticTransformer

Bases: BaseEstimator, TransformerMixin

Applies quadtratic feature transformations and rescales features from 0-1.

Source code in elapid/features.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
class QuadraticTransformer(BaseEstimator, TransformerMixin):
    """Applies quadtratic feature transformations and rescales features from 0-1."""

    def __init__(
        self,
        clamp: bool = MaxentConfig.clamp,
        feature_range: Tuple[float, float] = (0.0, 1.0),
    ):
        self.clamp = clamp
        self.feature_range = feature_range
        self.estimator = None

    def fit(self, x: ArrayLike) -> "QuadraticTransformer":
        """Compute the minimum and maximum for scaling.

        Args:
            x: array-like of shape (n_samples, n_features)
                The data used to compute the per-feature minimum and maximum
                used for later scaling along the features axis.

        Returns:
            self. Returns the transformer with fitted parameters.
        """
        self.estimator = MinMaxScaler(clip=self.clamp, feature_range=self.feature_range)
        self.estimator.fit(np.array(x) ** 2)

        return self

    def transform(self, x: ArrayLike) -> np.ndarray:
        """Scale covariates according to the feature range.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data that will be transformed.

        Returns:
            ndarray with transformed data.
        """
        return self.estimator.transform(np.array(x) ** 2)

    def fit_transform(self, x: ArrayLike) -> np.ndarray:
        """Fits scaler to x and returns transformed features.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data to fit the scaler and to transform.

        Returns:
            ndarray with transformed data.
        """
        self.fit(x)
        return self.estimator.transform(np.array(x) ** 2)

    def inverse_transform(self, x: ArrayLike) -> np.ndarray:
        """Revert from transformed features to original covariate values.

        Args:
            x: array-like of shape (n_xamples, n_features)
                Transformed feature data to convert to covariate data.

        Returns:
            ndarray with unscaled covariate values.
        """
        return self.estimator.inverse_transform(np.array(x)) ** 0.5

fit(x)

Compute the minimum and maximum for scaling.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.

required

Returns:

Type Description
QuadraticTransformer

self. Returns the transformer with fitted parameters.

Source code in elapid/features.py
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
def fit(self, x: ArrayLike) -> "QuadraticTransformer":
    """Compute the minimum and maximum for scaling.

    Args:
        x: array-like of shape (n_samples, n_features)
            The data used to compute the per-feature minimum and maximum
            used for later scaling along the features axis.

    Returns:
        self. Returns the transformer with fitted parameters.
    """
    self.estimator = MinMaxScaler(clip=self.clamp, feature_range=self.feature_range)
    self.estimator.fit(np.array(x) ** 2)

    return self

fit_transform(x)

Fits scaler to x and returns transformed features.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data to fit the scaler and to transform.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
136
137
138
139
140
141
142
143
144
145
146
147
def fit_transform(self, x: ArrayLike) -> np.ndarray:
    """Fits scaler to x and returns transformed features.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data to fit the scaler and to transform.

    Returns:
        ndarray with transformed data.
    """
    self.fit(x)
    return self.estimator.transform(np.array(x) ** 2)

inverse_transform(x)

Revert from transformed features to original covariate values.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_xamples, n_features) Transformed feature data to convert to covariate data.

required

Returns:

Type Description
np.ndarray

ndarray with unscaled covariate values.

Source code in elapid/features.py
149
150
151
152
153
154
155
156
157
158
159
def inverse_transform(self, x: ArrayLike) -> np.ndarray:
    """Revert from transformed features to original covariate values.

    Args:
        x: array-like of shape (n_xamples, n_features)
            Transformed feature data to convert to covariate data.

    Returns:
        ndarray with unscaled covariate values.
    """
    return self.estimator.inverse_transform(np.array(x)) ** 0.5

transform(x)

Scale covariates according to the feature range.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data that will be transformed.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
124
125
126
127
128
129
130
131
132
133
134
def transform(self, x: ArrayLike) -> np.ndarray:
    """Scale covariates according to the feature range.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data that will be transformed.

    Returns:
        ndarray with transformed data.
    """
    return self.estimator.transform(np.array(x) ** 2)

ThresholdTransformer

Bases: BaseEstimator, TransformerMixin

Apply binary thresholds across evenly-spaced bins for each covariate.

Source code in elapid/features.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
class ThresholdTransformer(BaseEstimator, TransformerMixin):
    """Apply binary thresholds across evenly-spaced bins for each covariate."""

    def __init__(self, n_thresholds: int = MaxentConfig.n_threshold_features):
        self.n_thresholds = n_thresholds
        self.mins_ = None
        self.maxs_ = None
        self.threshold_indices_ = None

    def fit(self, x: ArrayLike) -> "ThresholdTransformer":
        """Compute the minimum and maximum for scaling.

        Args:
            x: array-like of shape (n_samples, n_features)
                The data used to compute the per-feature minimum and maximum
                used for later scaling along the features axis.

        Returns:
            self. Returns the transformer with fitted parameters.
        """
        x = np.array(x)
        self.mins_ = x.min(axis=0)
        self.maxs_ = x.max(axis=0)
        self.threshold_indices_ = np.linspace(self.mins_, self.maxs_, self.n_thresholds)

        return self

    def transform(self, x: ArrayLike) -> np.ndarray:
        """Scale covariates according to the feature range.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data that will be transformed.

        Returns:
            ndarray with transformed data.
        """
        x = np.array(x)
        xarr = repeat_array(x, len(self.threshold_indices_), axis=-1)
        tarr = repeat_array(self.threshold_indices_.transpose(), len(x), axis=0)
        thresh = (xarr > tarr).reshape(x.shape[0], -1)
        return thresh.astype(np.uint8)

    def fit_transform(self, x: ArrayLike) -> np.ndarray:
        """Fits scaler to x and returns transformed features.

        Args:
            x: array-like of shape (n_samples, n_features)
                Input data to fit the scaler and to transform.

        Returns:
            ndarray with transformed data.
        """
        self.fit(x)
        return self.transform(x)

fit(x)

Compute the minimum and maximum for scaling.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.

required

Returns:

Type Description
ThresholdTransformer

self. Returns the transformer with fitted parameters.

Source code in elapid/features.py
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
def fit(self, x: ArrayLike) -> "ThresholdTransformer":
    """Compute the minimum and maximum for scaling.

    Args:
        x: array-like of shape (n_samples, n_features)
            The data used to compute the per-feature minimum and maximum
            used for later scaling along the features axis.

    Returns:
        self. Returns the transformer with fitted parameters.
    """
    x = np.array(x)
    self.mins_ = x.min(axis=0)
    self.maxs_ = x.max(axis=0)
    self.threshold_indices_ = np.linspace(self.mins_, self.maxs_, self.n_thresholds)

    return self

fit_transform(x)

Fits scaler to x and returns transformed features.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data to fit the scaler and to transform.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
259
260
261
262
263
264
265
266
267
268
269
270
def fit_transform(self, x: ArrayLike) -> np.ndarray:
    """Fits scaler to x and returns transformed features.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data to fit the scaler and to transform.

    Returns:
        ndarray with transformed data.
    """
    self.fit(x)
    return self.transform(x)

transform(x)

Scale covariates according to the feature range.

Parameters:

Name Type Description Default
x ArrayLike

array-like of shape (n_samples, n_features) Input data that will be transformed.

required

Returns:

Type Description
np.ndarray

ndarray with transformed data.

Source code in elapid/features.py
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
def transform(self, x: ArrayLike) -> np.ndarray:
    """Scale covariates according to the feature range.

    Args:
        x: array-like of shape (n_samples, n_features)
            Input data that will be transformed.

    Returns:
        ndarray with transformed data.
    """
    x = np.array(x)
    xarr = repeat_array(x, len(self.threshold_indices_), axis=-1)
    tarr = repeat_array(self.threshold_indices_.transpose(), len(x), axis=0)
    thresh = (xarr > tarr).reshape(x.shape[0], -1)
    return thresh.astype(np.uint8)

column_product(array)

Computes the column-wise product of a 2D array.

Parameters:

Name Type Description Default
array np.ndarray

array-like of shape (n_samples, n_features)

required

Returns:

Type Description
np.ndarray

ndarray with of shape (n_samples, factorial(n_features-1))

Source code in elapid/features.py
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
def column_product(array: np.ndarray) -> np.ndarray:
    """Computes the column-wise product of a 2D array.

    Args:
        array: array-like of shape (n_samples, n_features)

    Returns:
        ndarray with of shape (n_samples, factorial(n_features-1))
    """
    nrows, ncols = array.shape

    if ncols == 1:
        return array
    else:
        products = []
        for xstart in range(0, ncols - 1):
            products.append(array[:, xstart].reshape(nrows, 1) * array[:, xstart + 1 :])
        return np.concatenate(products, axis=1)

compute_lambdas(y, weights, reg, n_lambdas=MaxentConfig.n_lambdas)

Computes lambda parameter values for elastic lasso fits.

Parameters:

Name Type Description Default
y ArrayLike

array-like of shape (n_samples,) with binary presence/background (1/0) values

required
weights ArrayLike

per-sample model weights

required
reg ArrayLike

per-feature regularization coefficients

required
n_lambdas int

number of lambda values to estimate

MaxentConfig.n_lambdas

Returns:

Name Type Description
lambdas np.ndarray

Array of lambda scores of length n_lambda

Source code in elapid/features.py
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
def compute_lambdas(
    y: ArrayLike, weights: ArrayLike, reg: ArrayLike, n_lambdas: int = MaxentConfig.n_lambdas
) -> np.ndarray:
    """Computes lambda parameter values for elastic lasso fits.

    Args:
        y: array-like of shape (n_samples,) with binary presence/background (1/0) values
        weights: per-sample model weights
        reg: per-feature regularization coefficients
        n_lambdas: number of lambda values to estimate

    Returns:
        lambdas: Array of lambda scores of length n_lambda
    """
    n_presence = np.sum(y)
    mean_regularization = np.mean(reg)
    total_weight = np.sum(weights)
    seed_range = np.linspace(4, 0, n_lambdas)
    lambdas = 10 ** (seed_range) * mean_regularization * (n_presence / total_weight)

    return lambdas

compute_regularization(y, z, feature_labels, beta_multiplier=MaxentConfig.beta_multiplier, beta_lqp=MaxentConfig.beta_lqp, beta_threshold=MaxentConfig.beta_threshold, beta_hinge=MaxentConfig.beta_hinge, beta_categorical=MaxentConfig.beta_hinge)

Computes variable regularization values for all feature data.

Parameters:

Name Type Description Default
y ArrayLike

array-like of shape (n_samples,) with binary presence/background (1/0) values

required
z np.ndarray

model features (transformations applied to covariates)

required
feature_labels List[str]

list of length n_features, with labels identifying each column's feature type with options ["linear", "quadratic", "product", "threshold", "hinge", "categorical"]

required
beta_multiplier float

scaler for all regularization parameters. higher values exclude more features

MaxentConfig.beta_multiplier
beta_lqp float

scaler for linear, quadratic and product feature regularization

MaxentConfig.beta_lqp
beta_threshold float

scaler for threshold feature regularization

MaxentConfig.beta_threshold
beta_hinge float

scaler for hinge feature regularization

MaxentConfig.beta_hinge
beta_categorical float

scaler for categorical feature regularization

MaxentConfig.beta_hinge

Returns:

Name Type Description
max_reg np.ndarray

Array with per-feature regularization parameters

Source code in elapid/features.py
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
def compute_regularization(
    y: ArrayLike,
    z: np.ndarray,
    feature_labels: List[str],
    beta_multiplier: float = MaxentConfig.beta_multiplier,
    beta_lqp: float = MaxentConfig.beta_lqp,
    beta_threshold: float = MaxentConfig.beta_threshold,
    beta_hinge: float = MaxentConfig.beta_hinge,
    beta_categorical: float = MaxentConfig.beta_hinge,
) -> np.ndarray:
    """Computes variable regularization values for all feature data.

    Args:
        y: array-like of shape (n_samples,) with binary presence/background (1/0) values
        z: model features (transformations applied to covariates)
        feature_labels: list of length n_features, with labels identifying each column's feature type
            with options ["linear", "quadratic", "product", "threshold", "hinge", "categorical"]
        beta_multiplier: scaler for all regularization parameters. higher values exclude more features
        beta_lqp: scaler for linear, quadratic and product feature regularization
        beta_threshold: scaler for threshold feature regularization
        beta_hinge: scaler for hinge feature regularization
        beta_categorical: scaler for categorical feature regularization

    Returns:
        max_reg: Array with per-feature regularization parameters
    """
    # compute regularization based on presence-only locations
    z1 = z[y == 1]
    nrows, ncols = z1.shape
    labels = np.array(feature_labels)
    nlabels = len(feature_labels)

    assert nlabels == ncols, f"number of feature_labels ({nlabels}) must match number of features ({ncols})"

    # create arrays to store the regularization params
    base_regularization = np.zeros(ncols)
    hinge_regularization = np.zeros(ncols)
    threshold_regularization = np.zeros(ncols)

    # use a different reg table based on the features set
    if "product" in labels:
        table_lqp = RegularizationConfig.product
    elif "quadratic" in labels:
        table_lqp = RegularizationConfig.quadratic
    else:
        table_lqp = RegularizationConfig.linear

    if "linear" in labels:
        linear_idxs = labels == "linear"
        fr_max, fr_min = table_lqp
        multiplier = beta_lqp
        ap = np.interp(nrows, fr_max, fr_min)
        reg = multiplier * ap / np.sqrt(nrows)
        base_regularization[linear_idxs] = reg

    if "quadratic" in labels:
        quadratic_idxs = labels == "quadratic"
        fr_max, fr_min = table_lqp
        multiplier = beta_lqp
        ap = np.interp(nrows, fr_max, fr_min)
        reg = multiplier * ap / np.sqrt(nrows)
        base_regularization[quadratic_idxs] = reg

    if "product" in labels:
        product_idxs = labels == "product"
        fr_max, fr_min = table_lqp
        multiplier = beta_lqp
        ap = np.interp(nrows, fr_max, fr_min)
        reg = multiplier * ap / np.sqrt(nrows)
        base_regularization[product_idxs] = reg

    if "threshold" in labels:
        threshold_idxs = labels == "threshold"
        fr_max, fr_min = RegularizationConfig.threshold
        multiplier = beta_threshold
        ap = np.interp(nrows, fr_max, fr_min)
        reg = multiplier * ap / np.sqrt(nrows)
        base_regularization[threshold_idxs] = reg

        # increase regularization for uniform threshlold values
        all_zeros = np.all(z1 == 0, axis=0)
        all_ones = np.all(z1 == 1, axis=0)
        threshold_regularization[all_zeros] = 1
        threshold_regularization[all_ones] = 1

    if "hinge" in labels:
        hinge_idxs = labels == "hinge"
        fr_max, fr_min = RegularizationConfig.hinge
        multiplier = beta_hinge
        ap = np.interp(nrows, fr_max, fr_min)
        reg = multiplier * ap / np.sqrt(nrows)
        base_regularization[hinge_idxs] = reg

        # increase regularization for extreme hinge values
        hinge_std = np.std(z1[:, hinge_idxs], ddof=1, axis=0)
        hinge_sqrt = np.zeros(len(hinge_std)) + (1 / np.sqrt(nrows))
        std = np.max((hinge_std, hinge_sqrt), axis=0)
        hinge_regularization[hinge_idxs] = (0.5 * std) / np.sqrt(nrows)

    if "categorical" in labels:
        categorical_idxs = labels == "categorical"
        fr_max, fr_min = RegularizationConfig.categorical
        multiplier = beta_categorical
        ap = np.interp(nrows, fr_max, fr_min)
        reg = multiplier * ap / np.sqrt(nrows)
        base_regularization[categorical_idxs] = reg

    # compute the maximum regularization based on a few different approaches
    default_regularization = 0.001 * (np.max(z, axis=0) - np.min(z, axis=0))
    variance_regularization = np.std(z1, ddof=1, axis=0) * base_regularization
    max_regularization = np.max(
        (default_regularization, variance_regularization, hinge_regularization, threshold_regularization), axis=0
    )

    # apply the final scaling factor
    max_regularization *= beta_multiplier

    return max_regularization

compute_weights(y, pbr=100)

Compute Maxent-format per-sample model weights.

Parameters:

Name Type Description Default
y ArrayLike

array-like of shape (n_samples,) with binary presence/background (1/0) values

required
pbr int

presence-to-background weight ratio. pbr=100 sets background samples to 1/100 weight of presence samples.

100

Returns:

Name Type Description
weights np.ndarray

array with glmnet-formatted sample weights

Source code in elapid/features.py
607
608
609
610
611
612
613
614
615
616
617
618
def compute_weights(y: ArrayLike, pbr: int = 100) -> np.ndarray:
    """Compute Maxent-format per-sample model weights.

    Args:
        y: array-like of shape (n_samples,) with binary presence/background (1/0) values
        pbr: presence-to-background weight ratio. pbr=100 sets background samples to 1/100 weight of presence samples.

    Returns:
        weights: array with glmnet-formatted sample weights
    """
    weights = np.array(y + (1 - y) * pbr)
    return weights

left_hinge(x, mn, mx)

Computes hinge transformation values.

Parameters:

Name Type Description Default
x ArrayLike

Array-like of covariate values

required
mn float

Minimum covariate value to fit hinges to

required
mx float

Maximum covariate value to fit hinges to

required

Returns:

Type Description
np.ndarray

Array of hinge features

Source code in elapid/features.py
578
579
580
581
582
583
584
585
586
587
588
589
def left_hinge(x: ArrayLike, mn: float, mx: float) -> np.ndarray:
    """Computes hinge transformation values.

    Args:
        x: Array-like of covariate values
        mn: Minimum covariate value to fit hinges to
        mx: Maximum covariate value to fit hinges to

    Returns:
        Array of hinge features
    """
    return np.minimum(1, np.maximum(0, (x - mn) / (repeat_array(mx, mn.shape[-1], axis=1) - mn)))

right_hinge(x, mn, mx)

Computes hinge transformation values.

Parameters:

Name Type Description Default
x ArrayLike

Array-like of covariate values

required
mn float

Minimum covariate value to fit hinges to

required
mx float

Maximum covariate value to fit hinges to

required

Returns:

Type Description
np.ndarray

Array of hinge features

Source code in elapid/features.py
592
593
594
595
596
597
598
599
600
601
602
603
604
def right_hinge(x: ArrayLike, mn: float, mx: float) -> np.ndarray:
    """Computes hinge transformation values.

    Args:
        x: Array-like of covariate values
        mn: Minimum covariate value to fit hinges to
        mx: Maximum covariate value to fit hinges to

    Returns:
        Array of hinge features
    """
    mn_broadcast = repeat_array(mn, mx.shape[-1], axis=1)
    return np.minimum(1, np.maximum(0, (x - mn_broadcast) / (mx - mn_broadcast)))