tf

config

class flatiron.tf.config.TFAxis(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
axis: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFBaseConfig(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

name: str
class flatiron.tf.config.TFBeta(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
beta_1: float
beta_2: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFClsId(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
class_id: Optional[int]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFEpsilon(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
epsilon: Annotated[float]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFFramework(**data)[source]

Bases: BaseModel

Configuration for calls to model.compile.

See: https://www.tensorflow.org/api_docs/python/tf/keras/Model#compile

name

Framework name. Default: ‘tensorflow’.

Type:

str

device

Hardware device. Default: ‘gpu’.

Type:

str, optional

loss_weights

List of loss weights. Default: None.

Type:

list[float], optional

weighted_metrics

List of metric weights. Default: None.

Type:

list[float], optional

run_eagerly

Leave as False. Default: False.

Type:

bool, optional

steps_per_execution

Number of batches per function call. Default: 1.

Type:

int, optional

jit_compile

Use XLA. Default: False.

Type:

bool, optional

auto_scale_loss

Model dtype is mixed_float16 when True. Default: True.

Type:

bool, optional

_abc_impl = <_abc._abc_data object>
auto_scale_loss: bool
device: Annotated[str]
jit_compile: bool
loss_weights: Optional[list[float]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

name: str
run_eagerly: bool
steps_per_execution: Annotated[int]
weighted_metrics: Optional[list[float]]
class flatiron.tf.config.TFIgnoreClass(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
ignore_class: Optional[int]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLogits(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
from_logits: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossBaseConfig(**data)[source]

Bases: TFBaseConfig

_abc_impl = <_abc._abc_data object>
dtype: Optional[Annotated[str]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

reduction: Annotated[str]
class flatiron.tf.config.TFLossBinaryCrossentropy(**data)[source]

Bases: TFLossBaseConfig, TFAxis, TFLogits

_abc_impl = <_abc._abc_data object>
label_smoothing: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossBinaryFocalCrossentropy(**data)[source]

Bases: TFLossBaseConfig, TFAxis, TFLogits

_abc_impl = <_abc._abc_data object>
alpha: float
apply_class_balancing: bool
gamma: float
label_smoothing: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossCategoricalCrossentropy(**data)[source]

Bases: TFLossBaseConfig, TFAxis, TFLogits

_abc_impl = <_abc._abc_data object>
label_smoothing: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossCategoricalFocalCrossentropy(**data)[source]

Bases: TFLossBaseConfig, TFAxis, TFLogits

_abc_impl = <_abc._abc_data object>
alpha: float
gamma: float
label_smoothing: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossCircle(**data)[source]

Bases: TFLossBaseConfig

_abc_impl = <_abc._abc_data object>
gamma: float
margin: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

remove_diagonal: bool
class flatiron.tf.config.TFLossCosineSimilarity(**data)[source]

Bases: TFLossBaseConfig, TFAxis

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossDice(**data)[source]

Bases: TFLossBaseConfig, TFAxis

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossHuber(**data)[source]

Bases: TFLossBaseConfig

_abc_impl = <_abc._abc_data object>
delta: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossSparseCategoricalCrossentropy(**data)[source]

Bases: TFLossBaseConfig, TFLogits, TFIgnoreClass

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFLossTversky(**data)[source]

Bases: TFLossBaseConfig, TFAxis

_abc_impl = <_abc._abc_data object>
alpha: float
beta: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricAUC(**data)[source]

Bases: TFMetricBaseConfig, TFLogits, TFNumThresh, TFThresh

_abc_impl = <_abc._abc_data object>
curve: Annotated[str]
label_weights: Optional[list[float]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

multi_label: bool
num_labels: Optional[int]
summation_method: Annotated[str]
class flatiron.tf.config.TFMetricAccuracy(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricBaseConfig(**data)[source]

Bases: TFBaseConfig

_abc_impl = <_abc._abc_data object>
dtype: Optional[Annotated[str]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricBinaryAccuracy(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

threshold: float
class flatiron.tf.config.TFMetricBinaryCrossentropy(**data)[source]

Bases: TFMetricBaseConfig, TFLogits

_abc_impl = <_abc._abc_data object>
label_smoothing: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricBinaryIoU(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

target_class_ids: list[int]
threshold: float
class flatiron.tf.config.TFMetricCategoricalAccuracy(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricCategoricalCrossentropy(**data)[source]

Bases: TFMetricBaseConfig, TFAxis, TFLogits

_abc_impl = <_abc._abc_data object>
label_smoothing: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricCategoricalHinge(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricConcordanceCorrelation(**data)[source]

Bases: TFMetricBaseConfig, TFAxis

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricCosineSimilarity(**data)[source]

Bases: TFMetricBaseConfig, TFAxis

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricF1Score(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
average: Optional[str]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

threshold: Optional[float]
class flatiron.tf.config.TFMetricFBetaScore(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
average: Optional[str]
beta: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

threshold: Optional[float]
class flatiron.tf.config.TFMetricFalseNegatives(**data)[source]

Bases: TFMetricBaseConfig, TFThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricFalsePositives(**data)[source]

Bases: TFMetricBaseConfig, TFThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricHinge(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricIoU(**data)[source]

Bases: TFMetricBaseConfig, TFAxis, TFIgnoreClass, TFNumClasses

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

sparse_y_pred: bool
sparse_y_true: bool
target_class_ids: list[int]
class flatiron.tf.config.TFMetricKLDivergence(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricLogCoshError(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricMean(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricMeanAbsoluteError(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricMeanAbsolutePercentageError(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricMeanIoU(**data)[source]

Bases: TFMetricBaseConfig, TFAxis, TFIgnoreClass, TFNumClasses

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

sparse_y_pred: bool
sparse_y_true: bool
class flatiron.tf.config.TFMetricMeanSquaredError(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricMeanSquaredLogarithmicError(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricMetric(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricOneHotIoU(**data)[source]

Bases: TFMetricBaseConfig, TFAxis, TFIgnoreClass, TFNumClasses

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

sparse_y_pred: bool
target_class_ids: list[int]
class flatiron.tf.config.TFMetricOneHotMeanIoU(**data)[source]

Bases: TFMetricBaseConfig, TFAxis, TFIgnoreClass, TFNumClasses

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

sparse_y_pred: bool
class flatiron.tf.config.TFMetricPearsonCorrelation(**data)[source]

Bases: TFMetricBaseConfig, TFAxis

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricPoisson(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricPrecision(**data)[source]

Bases: TFMetricBaseConfig, TFClsId, TFThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

top_k: Optional[int]
class flatiron.tf.config.TFMetricPrecisionAtRecall(**data)[source]

Bases: TFMetricBaseConfig, TFClsId, TFNumThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

recall: float
class flatiron.tf.config.TFMetricR2Score(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
class_aggregation: Optional[Annotated[str]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

num_regressors: Annotated[int]
class flatiron.tf.config.TFMetricRecall(**data)[source]

Bases: TFMetricBaseConfig, TFClsId, TFThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

top_k: Optional[int]
class flatiron.tf.config.TFMetricRecallAtPrecision(**data)[source]

Bases: TFMetricBaseConfig, TFClsId, TFNumThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

precision: float
class flatiron.tf.config.TFMetricRootMeanSquaredError(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricSensitivityAtSpecificity(**data)[source]

Bases: TFMetricBaseConfig, TFClsId, TFNumThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

specificity: float
class flatiron.tf.config.TFMetricSparseCategoricalAccuracy(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricSparseCategoricalCrossentropy(**data)[source]

Bases: TFMetricBaseConfig, TFAxis, TFLogits

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricSparseTopKCategoricalAccuracy(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
from_sorted_ids: bool
k: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricSpecificityAtSensitivity(**data)[source]

Bases: TFMetricBaseConfig, TFClsId, TFNumThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

sensitivity: float
class flatiron.tf.config.TFMetricSquaredHinge(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricSum(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricTopKCategoricalAccuracy(**data)[source]

Bases: TFMetricBaseConfig

_abc_impl = <_abc._abc_data object>
k: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricTrueNegatives(**data)[source]

Bases: TFMetricBaseConfig, TFThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFMetricTruePositives(**data)[source]

Bases: TFMetricBaseConfig, TFThresh

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFNumClasses(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

num_classes: Annotated[int]
class flatiron.tf.config.TFNumThresh(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

num_thresholds: Annotated[int]
class flatiron.tf.config.TFOptAdadelta(**data)[source]

Bases: TFOptBaseConfig, TFEpsilon

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

rho: float
class flatiron.tf.config.TFOptAdafactor(**data)[source]

Bases: TFOptBaseConfig

_abc_impl = <_abc._abc_data object>
beta_2_decay: float
clip_threshold: float
epsilon_1: Annotated[float]
epsilon_2: Annotated[float]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

relative_step: bool
class flatiron.tf.config.TFOptAdagrad(**data)[source]

Bases: TFOptBaseConfig, TFEpsilon

_abc_impl = <_abc._abc_data object>
initial_accumulator_value: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFOptAdam(**data)[source]

Bases: TFOptBaseConfig, TFBeta, TFEpsilon

_abc_impl = <_abc._abc_data object>
amsgrad: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFOptAdamW(**data)[source]

Bases: TFOptBaseConfig, TFBeta, TFEpsilon

_abc_impl = <_abc._abc_data object>
amsgrad: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

weight_decay: float
class flatiron.tf.config.TFOptAdamax(**data)[source]

Bases: TFOptBaseConfig, TFBeta, TFEpsilon

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFOptBaseConfig(**data)[source]

Bases: TFBaseConfig

_abc_impl = <_abc._abc_data object>
clipnorm: Optional[float]
clipvalue: Optional[float]
ema_momentum: Annotated[float]
ema_overwrite_frequency: Optional[Annotated[int]]
global_clipnorm: Optional[float]
gradient_accumulation_steps: Optional[Annotated[int]]
learning_rate: Optional[Annotated[float]]
loss_scale_factor: Optional[float]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

use_ema: bool
class flatiron.tf.config.TFOptFtrl(**data)[source]

Bases: TFOptBaseConfig

_abc_impl = <_abc._abc_data object>
beta: float
initial_accumulator_value: float
l1_regularization_strength: float
l2_regularization_strength: float
l2_shrinkage_regularization_strength: float
learning_rate_power: Annotated[float]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFOptLamb(**data)[source]

Bases: TFOptBaseConfig, TFBeta, TFEpsilon

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFOptLion(**data)[source]

Bases: TFOptBaseConfig, TFBeta

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFOptNadam(**data)[source]

Bases: TFOptBaseConfig, TFBeta, TFEpsilon

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.tf.config.TFOptRMSprop(**data)[source]

Bases: TFOptBaseConfig, TFEpsilon

_abc_impl = <_abc._abc_data object>
centered: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

momentum: float
rho: float
class flatiron.tf.config.TFOptSGD(**data)[source]

Bases: TFOptBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

momentum: Annotated[float]
nesterov: bool
class flatiron.tf.config.TFThresh(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

thresholds: Union[float, list[float], None]

loss

flatiron.tf.loss.dice_loss(y_true, y_pred, smooth=1)[source]

Dice loss function with smoothing factor to prevent exploding or vanishing gradients.

See: https://cnvrg.io/semantic-segmentation

Equation:

\begin{alignat*}{3} \definecolor{blue2}{rgb}{0.58, 0.71, 0.9} \definecolor{cyan2}{rgb}{0.71, 0.93, 0.95} \definecolor{green2}{rgb}{0.63, 0.82, 0.48} \definecolor{light1}{rgb}{0.64, 0.64, 0.64} \definecolor{red2}{rgb}{0.87, 0.58, 0.56} \color{cyan2} L_{dice}(y, \hat{y}, S) && = 1 - \frac{2 * I + S}{U + S} \end{alignat*}

Terms:

\begin{alignat*}{3} intersection & \rightarrow \color{red2} I(y, \hat{y}) && = \sum{|y_i * \hat{y_i}|} \\ union & \rightarrow \color{green2} U(y, \hat{y}) && = \sum{(|y_i| + |\hat{y_i}|)} \\ \text{smoothing factor} & \rightarrow \color{blue2} S && \\ \text{expansion} & \rightarrow \color{cyan2} L_{dice}(y, \hat{y}, S) && = 1 - \frac{ 2 * \color{red2} \sum{|y_i * \hat{y_i}|} \color{white} + \color{blue2} S }{ \color{green2} \sum{(|y_i| + |\hat{y_i}|)} \color{white} + \color{blue2} S } \end{alignat*}
Parameters:
  • y_true (NDArray or Tensor) – Ground truth labels.

  • y_pred (NDArray or Tensor) – Predicted labels.

  • smooth (int, optional) – Smoothing factor. Default: 1.

Returns:

Loss function.

Return type:

tf.Tensor

flatiron.tf.loss.get(config)[source]

Get function from this module.

Parameters:

config (dict) – Optimizer config.

Returns:

Module function.

Return type:

function

flatiron.tf.loss.jaccard_loss(y_true, y_pred, smooth=100)[source]

Jaccard’s loss is usefull for unbalanced datasets. This has been shifted so it converges on 0 and is smoothed to avoid exploding or disappearing gradients.

See: https://en.wikipedia.org/wiki/Jaccard_index

Equation:

\begin{alignat*}{3} \definecolor{blue2}{rgb}{0.58, 0.71, 0.9} \definecolor{cyan2}{rgb}{0.71, 0.93, 0.95} \definecolor{green2}{rgb}{0.63, 0.82, 0.48} \definecolor{light1}{rgb}{0.64, 0.64, 0.64} \definecolor{red2}{rgb}{0.87, 0.58, 0.56} \color{cyan2} L_{jacc}(y, \hat{y}, S) && = (1 - \frac{I + S}{U - I + S}) S \end{alignat*}

Terms:

\begin{alignat*}{3} intersection & \rightarrow \color{red2} I(y, \hat{y}) && = \sum{|y_i * \hat{y_i}|} \\ union & \rightarrow \color{green2} U(y, \hat{y}) && = \sum{(|y_i| + |\hat{y_i}|)} \\ \text{smoothing factor} & \rightarrow \color{blue2} S && \\ \text{expansion} & \rightarrow \color{cyan2} L_{jacc}(y, \hat{y}, S) && = (1 - \frac{ \color{red2} \sum{|y_i * \hat{y_i}|} \color{white} + \color{blue2} S }{ \color{green2} \sum{(|y_i| + |\hat{y_i}|)} \color{white} - \color{red2} \sum{|y_i * \hat{y_i}|} \color{white} + \color{blue2} S }) \color{blue2} S \end{alignat*}
Parameters:
  • y_true (NDArray or Tensor) – Ground truth labels.

  • y_pred (NDArray or Tensor) – Predicted labels.

  • smooth (int, optional) – Smoothing factor. Default: 100.

Returns:

Loss function.

Return type:

tf.Tensor

metric

flatiron.tf.metric.dice(y_true, y_pred, smooth=1.0)[source]

Dice metric.

See: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient

Equation:

\begin{alignat*}{3} \definecolor{blue2}{rgb}{0.58, 0.71, 0.9} \definecolor{cyan2}{rgb}{0.71, 0.93, 0.95} \definecolor{green2}{rgb}{0.63, 0.82, 0.48} \definecolor{light1}{rgb}{0.64, 0.64, 0.64} \definecolor{red2}{rgb}{0.87, 0.58, 0.56} \color{cyan2} Dice(y, \hat{y}) && = \frac{2 * I + S}{U + S} \end{alignat*}

Terms:

\begin{alignat*}{3} intersection & \rightarrow \color{red2} I(y, \hat{y}) && = \sum{(y_i * \hat{y_i})} \\ \text{union} & \rightarrow \color{green2} U(y, \hat{y}) && = \sum{(y_i + \hat{y_i})} \\ \text{smoothing factor} & \rightarrow \color{blue2} S \\ \text{expansion} & \rightarrow \color{cyan2} Dice(y, \hat{y}, S) && = \frac{ \color{white} 2 * \color{red2} \sum{(y_i * \hat{y_i})} \color{white} + \color{blue2} S }{ \color{green2} \sum{(y_i + \hat{y_i})} \color{white} + \color{blue2} S } \end{alignat*}
Parameters:
  • y_true (NDArray or Tensor) – True labels.

  • y_pred (NDArray or Tensor) – Predicted labels.

  • smooth (float, optional) – Smoothing factor. Default: 1.0

Returns:

Dice metric.

Return type:

tf.Tensor

flatiron.tf.metric.get(config)[source]

Get function from this module.

Parameters:

config (dict) – Optimizer config.

Returns:

Module function.

Return type:

function

flatiron.tf.metric.intersection_over_union(y_true, y_pred, smooth=1.0)[source]

Intersection over union metric.

See: https://medium.com/analytics-vidhya/iou-intersection-over-union-705a39e7acef

Equation:

\begin{alignat*}{3} \definecolor{blue2}{rgb}{0.58, 0.71, 0.9} \definecolor{cyan2}{rgb}{0.71, 0.93, 0.95} \definecolor{green2}{rgb}{0.63, 0.82, 0.48} \definecolor{light1}{rgb}{0.64, 0.64, 0.64} \definecolor{red2}{rgb}{0.87, 0.58, 0.56} \color{cyan2} IOU (y, \hat{y}, S) && = \frac{I + S}{U + S} \end{alignat*}

Terms:

\begin{alignat*}{3} intersection & \rightarrow \color{red2} I(y, \hat{y}) && = \sum{(y_i * \hat{y_i})} \\ union & \rightarrow \color{green2} U(y, \hat{y}) && = \sum{(y_i + \hat{y_i})} - I(y_i, \hat{y_i}) \\ \text{smoothing factor} & \rightarrow \color{blue2} S \\ \text{expansion} & \rightarrow \color{cyan2} IOU(y, \hat{y}, S) && = \frac{ \color{red2} \sum{(y_i * \hat{y_i})} \color{white} + \color{blue2} S }{ \color{green2} \sum{(y_i + \hat{y_i})} - \sum{(y_i * \hat{y_i})} \color{white} + \color{blue2} S } \end{alignat*}
Parameters:
  • y_true (NDArray or Tensor) – True labels.

  • y_pred (NDArray or Tensor) – Predicted labels.

  • smooth (float, optional) – Smoothing factor. Default: 1.0

Returns:

IOU metric.

Return type:

tf.Tensor

flatiron.tf.metric.jaccard(y_true, y_pred)[source]

Jaccard metric.

See: https://en.wikipedia.org/wiki/Jaccard_index

Equation:

\begin{alignat*}{3} \definecolor{blue2}{rgb}{0.58, 0.71, 0.9} \definecolor{cyan2}{rgb}{0.71, 0.93, 0.95} \definecolor{green2}{rgb}{0.63, 0.82, 0.48} \definecolor{light1}{rgb}{0.64, 0.64, 0.64} \definecolor{red2}{rgb}{0.87, 0.58, 0.56} \color{cyan2} Jacc(y, \hat{y}) && = \frac{1}{N} \sum_{i=0}^{N} \frac{I + 1}{U + 1} \end{alignat*}

Terms:

\begin{alignat*}{3} intersection & \rightarrow \color{red2} I(y, \hat{y}) && = \sum{(y_i * \hat{y_i})} \\ union & \rightarrow \color{green2} U(y, \hat{y}) && = \sum{(y_i + \hat{y_i})} - I(y_i, \hat{y_i}) \\ \text{expansion} & \rightarrow \color{cyan2} Jacc(y, \hat{y}) && = \frac{1}{N} \sum_{i=0}^{N} \frac{ \color{red2} \sum{(y_i * \hat{y_i})} \color{white} + 1 }{ \color{green2} \sum{(y_i + \hat{y_i})} - \sum{(y_i * \hat{y_i})} \color{white} + 1 } \end{alignat*}
Parameters:
  • y_true (NDArray or Tensor) – True labels.

  • y_pred (NDArray or Tensor) – Predicted labels.

Returns:

Jaccard metric.

Return type:

tf.Tensor

optimizer

flatiron.tf.optimizer.get(config)[source]

Get function from this module.

Parameters:

config (dict) – Optimizer config.

Returns:

Module function.

Return type:

function

tools

flatiron.tf.tools.compile(framework, model, optimizer, loss, metrics)[source]

Call modile.compile on given model with kwargs.

Parameters:
  • framework (dict) – Framework dict.

  • model (Any) – Model to be compiled.

  • optimizer (dict) – Optimizer settings.

  • loss (dict) – Loss to be compiled.

  • metrics (list[dict]) – Metrics function to be compiled.

Returns:

Dict of compiled objects.

Return type:

dict

flatiron.tf.tools.get(config, module, fallback_module)[source]

Given a config and set of modules return an instance or function.

Parameters:
  • config (dict) – Instance config.

  • module (str) – Always __name__.

  • fallback_module (str) – Fallback module, either a tf or torch module.

Raises:

EnforceError – If config is not a dict with a name key.

Returns:

Instance or function.

Return type:

object

flatiron.tf.tools.get_callbacks(log_directory, checkpoint_pattern, checkpoint_params={})[source]

Create a list of callbacks for Tensoflow model.

Parameters:
  • log_directory (str or Path) – Tensorboard project log directory.

  • checkpoint_pattern (str) – Filepath pattern for checkpoint callback.

  • checkpoint_params (dict, optional) – Params to be passed to checkpoint callback. Default: {}.

Raises:
  • EnforceError – If log directory does not exist.

  • EnforeError – If checkpoint pattern does not contain ‘{epoch}’.

Returns:

dict with Tensorboard and ModelCheckpoint callbacks.

Return type:

dict

flatiron.tf.tools.pre_build(device)[source]

Sets hardware device.

Parameters:

device (str) – Hardware device.

Return type:

None

flatiron.tf.tools.train(compiled, callbacks, train_data, test_data, params)[source]

Train TensorFlow model.

Parameters:
  • compiled (dict) – Compiled objects.

  • callbacks (dict) – Dict of callbacks.

  • train_data (Dataset) – Training dataset.

  • test_data (Dataset) – Test dataset.

  • params (dict) – Training params.

Return type:

None