torch

config

class flatiron.torch.config.TAct(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
empty_target_action: Annotated[str]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TBeta(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
beta_1: float
beta_2: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TCap(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
capturable: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TCls(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

num_classes: Optional[int]
class flatiron.torch.config.TDate(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
data_range: Union[float, tuple[float, float], None]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TDecay(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

weight_decay: float
class flatiron.torch.config.TDiff(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
differentiable: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TEps(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
epsilon: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TFor(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
foreach: Optional[bool]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TGroup1(**data)[source]

Bases: TCap, TDecay, TDiff, TEps, TFor, TMax

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TGroup2(**data)[source]

Bases: TRed, TReduct, TSize

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TGroup3(**data)[source]

Bases: TMarg, TRed, TReduct, TSize

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TInd(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
ignore_index: Optional[int]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TMReduct(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

reduction: Annotated[str]
class flatiron.torch.config.TMarg(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
margin: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TMax(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
maximize: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TNan(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

nan_strategy: Union[float, Annotated[str]]
class flatiron.torch.config.TNanStrategy(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

nan_strategy: Annotated[str]
class flatiron.torch.config.TOut(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

num_outputs: int
class flatiron.torch.config.TRed(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

reduce: Optional[bool]
class flatiron.torch.config.TReduct(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

reduction: Annotated[str]
class flatiron.torch.config.TSize(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

size_average: Optional[bool]
class flatiron.torch.config.TTopK(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

top_k: Optional[int]
class flatiron.torch.config.TorchBaseConfig(**data)[source]

Bases: BaseModel

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

name: str
class flatiron.torch.config.TorchFramework(**data)[source]

Bases: BaseModel

Configuration for calls to torch train function.

name

Framework name. Default: ‘torch’.

Type:

str

device

Hardware device. Default: ‘cuda’.

Type:

str, optional

_abc_impl = <_abc._abc_data object>
device: Annotated[str]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

name: str
class flatiron.torch.config.TorchLossBCELoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossBCEWithLogitsLoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossCTCLoss(**data)[source]

Bases: TorchBaseConfig, TReduct

_abc_impl = <_abc._abc_data object>
blank: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

zero_infinity: bool
class flatiron.torch.config.TorchLossCosineEmbeddingLoss(**data)[source]

Bases: TorchBaseConfig, TGroup3

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossCrossEntropyLoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
ignore_index: int
label_smoothing: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossGaussianNLLLoss(**data)[source]

Bases: TorchBaseConfig, TEps, TReduct

_abc_impl = <_abc._abc_data object>
full: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossHingeEmbeddingLoss(**data)[source]

Bases: TorchBaseConfig, TGroup3

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossHuberLoss(**data)[source]

Bases: TorchBaseConfig, TReduct

_abc_impl = <_abc._abc_data object>
delta: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossKLDivLoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
log_target: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossL1Loss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossMSELoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossMarginRankingLoss(**data)[source]

Bases: TorchBaseConfig, TGroup3

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossMultiLabelMarginLoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossMultiLabelSoftMarginLoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossMultiMarginLoss(**data)[source]

Bases: TorchBaseConfig, TGroup3

_abc_impl = <_abc._abc_data object>
exponent: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossNLLLoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
ignore_index: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossPairwiseDistance(**data)[source]

Bases: TorchBaseConfig, TEps

_abc_impl = <_abc._abc_data object>
keepdim: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

norm_degree: float
class flatiron.torch.config.TorchLossPoissonNLLLoss(**data)[source]

Bases: TorchBaseConfig, TEps, TGroup2

_abc_impl = <_abc._abc_data object>
full: bool
log_input: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossSmoothL1Loss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
beta: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossSoftMarginLoss(**data)[source]

Bases: TorchBaseConfig, TGroup2

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchLossTripletMarginLoss(**data)[source]

Bases: TorchBaseConfig, TEps, TGroup3

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

norm_degree: float
swap: bool
class flatiron.torch.config.TorchLossTripletMarginWithDistanceLoss(**data)[source]

Bases: TorchBaseConfig, TMarg, TReduct

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

swap: bool
class flatiron.torch.config.TorchMetricBLEUScore(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

n_gram: int
smooth: bool
weights: Optional[list[float]]
class flatiron.torch.config.TorchMetricCHRFScore(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
beta: float
lowercase: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

n_char_order: int
n_word_order: int
return_sentence_level_score: bool
whitespace: bool
class flatiron.torch.config.TorchMetricCatMetric(**data)[source]

Bases: TorchBaseConfig, TNan

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricConcordanceCorrCoef(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricCosineSimilarity(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

reduction: Annotated[str]
class flatiron.torch.config.TorchMetricCramersV(**data)[source]

Bases: TorchBaseConfig, TCls, TNanStrategy

_abc_impl = <_abc._abc_data object>
bias_correction: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

nan_replace_value: Optional[float]
class flatiron.torch.config.TorchMetricCriticalSuccessIndex(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
keep_sequence_dim: Optional[int]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

threshold: float
class flatiron.torch.config.TorchMetricDice(**data)[source]

Bases: TorchBaseConfig, TCls, TInd, TTopK

_abc_impl = <_abc._abc_data object>
average: Optional[Annotated[str]]
mdmc_average: Optional[Annotated[str]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

multiclass: Optional[bool]
threshold: float
zero_division: int
class flatiron.torch.config.TorchMetricErrorRelativeGlobalDimensionlessSynthesis(**data)[source]

Bases: TorchBaseConfig, TMReduct

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

ratio: float
class flatiron.torch.config.TorchMetricExplainedVariance(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

multioutput: Annotated[str]
class flatiron.torch.config.TorchMetricExtendedEditDistance(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
alpha: float
deletion: float
insertion: float
language: Annotated[str]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

return_sentence_level_score: bool
rho: float
class flatiron.torch.config.TorchMetricFleissKappa(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
mode: Annotated[str]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricKLDivergence(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
log_prob: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

reduction: Annotated[str]
class flatiron.torch.config.TorchMetricKendallRankCorrCoef(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
alternative: Optional[Annotated[str]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

t_test: bool
variant: Annotated[str]
class flatiron.torch.config.TorchMetricLogCoshError(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricMaxMetric(**data)[source]

Bases: TorchBaseConfig, TNan

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricMeanAbsoluteError(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricMeanMetric(**data)[source]

Bases: TorchBaseConfig, TNan

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricMeanSquaredError(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

squared: bool
class flatiron.torch.config.TorchMetricMinMetric(**data)[source]

Bases: TorchBaseConfig, TNan

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricMinkowskiDistance(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

p: float
class flatiron.torch.config.TorchMetricModifiedPanopticQuality(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
allow_unknown_preds_category: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

stuffs: list[int]
things: list[int]
class flatiron.torch.config.TorchMetricMultiScaleStructuralSimilarityIndexMeasure(**data)[source]

Bases: TorchBaseConfig, TMReduct, TDate

_abc_impl = <_abc._abc_data object>
betas: tuple
gaussian_kernel: bool
k1: float
k2: float
kernel_size: Union[int, list[int]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

normalize: Optional[Annotated[str]]
sigma: Union[float, list[float]]
class flatiron.torch.config.TorchMetricNormalizedRootMeanSquaredError(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

normalization: Annotated[str]
class flatiron.torch.config.TorchMetricPanopticQuality(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
allow_unknown_preds_category: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

stuffs: list[int]
things: list[int]
class flatiron.torch.config.TorchMetricPeakSignalNoiseRatio(**data)[source]

Bases: TorchBaseConfig, TMReduct, TDate

_abc_impl = <_abc._abc_data object>
base: float
dim: Union[int, tuple[int, ...], None]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricPearsonCorrCoef(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricPearsonsContingencyCoefficient(**data)[source]

Bases: TorchBaseConfig, TNanStrategy

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

nan_replace_value: Optional[float]
num_classes: int
class flatiron.torch.config.TorchMetricPermutationInvariantTraining(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
eval_func: Annotated[str]
mode: Annotated[str]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricPerplexity(**data)[source]

Bases: TorchBaseConfig, TInd

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricR2Score(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
adjusted: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

multioutput: Annotated[str]
class flatiron.torch.config.TorchMetricRelativeAverageSpectralError(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

window_size: int
class flatiron.torch.config.TorchMetricRelativeSquaredError(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

squared: bool
class flatiron.torch.config.TorchMetricRetrievalFallOut(**data)[source]

Bases: TorchBaseConfig, TInd, TTopK

_abc_impl = <_abc._abc_data object>
empty_target_action: Annotated[str]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalHitRate(**data)[source]

Bases: TorchBaseConfig, TAct, TInd, TTopK

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalMAP(**data)[source]

Bases: TorchBaseConfig, TAct, TInd, TTopK

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalMRR(**data)[source]

Bases: TorchBaseConfig, TAct, TInd

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalNormalizedDCG(**data)[source]

Bases: TorchBaseConfig, TAct, TInd, TTopK

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalPrecision(**data)[source]

Bases: TorchBaseConfig, TAct, TInd, TTopK

_abc_impl = <_abc._abc_data object>
adaptive_k: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalPrecisionRecallCurve(**data)[source]

Bases: TorchBaseConfig, TInd

_abc_impl = <_abc._abc_data object>
adaptive_k: bool
max_k: Optional[int]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalRPrecision(**data)[source]

Bases: TorchBaseConfig, TAct, TInd

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalRecall(**data)[source]

Bases: TorchBaseConfig, TAct, TInd, TTopK

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRetrievalRecallAtFixedPrecision(**data)[source]

Bases: TorchBaseConfig, TAct, TInd

_abc_impl = <_abc._abc_data object>
adaptive_k: bool
max_k: Optional[int]
min_precision: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricRootMeanSquaredErrorUsingSlidingWindow(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

window_size: int
class flatiron.torch.config.TorchMetricRunningMean(**data)[source]

Bases: TorchBaseConfig, TNan

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

window: int
class flatiron.torch.config.TorchMetricRunningSum(**data)[source]

Bases: TorchBaseConfig, TNan

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

window: int
class flatiron.torch.config.TorchMetricSacreBLEUScore(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
lowercase: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

n_gram: int
smooth: bool
tokenize: Annotated[str]
weights: Optional[list[float]]
class flatiron.torch.config.TorchMetricScaleInvariantSignalDistortionRatio(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

zero_mean: bool
class flatiron.torch.config.TorchMetricSignalDistortionRatio(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
filter_length: int
load_diag: Optional[float]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

use_cg_iter: Optional[int]
zero_mean: bool
class flatiron.torch.config.TorchMetricSignalNoiseRatio(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

zero_mean: bool
class flatiron.torch.config.TorchMetricSpearmanCorrCoef(**data)[source]

Bases: TorchBaseConfig, TOut

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricSpectralAngleMapper(**data)[source]

Bases: TorchBaseConfig, TMReduct

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricSpectralDistortionIndex(**data)[source]

Bases: TorchBaseConfig, TMReduct

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

p: int
class flatiron.torch.config.TorchMetricStructuralSimilarityIndexMeasure(**data)[source]

Bases: TorchBaseConfig, TMReduct

_abc_impl = <_abc._abc_data object>
data_range: Union[float, tuple[float, float], None]
gaussian_kernel: bool
k1: float
k2: float
kernel_size: Union[int, list[int]]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

return_contrast_sensitivity: bool
return_full_image: bool
sigma: Union[float, list[float]]
class flatiron.torch.config.TorchMetricSumMetric(**data)[source]

Bases: TorchBaseConfig, TNan

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchMetricTheilsU(**data)[source]

Bases: TorchBaseConfig, TNanStrategy

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

nan_replace_value: Optional[float]
num_classes: int
class flatiron.torch.config.TorchMetricTotalVariation(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

reduction: Annotated[str]
class flatiron.torch.config.TorchMetricTranslationEditRate(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
asian_support: bool
lowercase: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

no_punctuation: bool
normalize: bool
return_sentence_level_score: bool
class flatiron.torch.config.TorchMetricTschuprowsT(**data)[source]

Bases: TorchBaseConfig, TNanStrategy

_abc_impl = <_abc._abc_data object>
bias_correction: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

nan_replace_value: Optional[float]
num_classes: int
class flatiron.torch.config.TorchMetricTweedieDevianceScore(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

power: float
class flatiron.torch.config.TorchMetricUniversalImageQualityIndex(**data)[source]

Bases: TorchBaseConfig, TMReduct

_abc_impl = <_abc._abc_data object>
kernel_size: tuple[int, ...]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

sigma: tuple[float, ...]
class flatiron.torch.config.TorchOptASGD(**data)[source]

Bases: TorchOptBaseConfig, TCap, TDecay, TDiff, TFor, TMax

_abc_impl = <_abc._abc_data object>
alpha: float
lambd: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

t0: float
class flatiron.torch.config.TorchOptAdadelta(**data)[source]

Bases: TorchOptBaseConfig, TGroup1

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

rho: float
class flatiron.torch.config.TorchOptAdafactor(**data)[source]

Bases: TorchOptBaseConfig, TDecay, TEps, TFor, TMax

_abc_impl = <_abc._abc_data object>
beta2_decay: float
clipping_threshold: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchOptAdagrad(**data)[source]

Bases: TorchOptBaseConfig, TDecay, TDiff, TEps, TFor, TMax

_abc_impl = <_abc._abc_data object>
fused: Optional[bool]
initial_accumulator_value: float
lr_decay: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchOptAdam(**data)[source]

Bases: TorchOptBaseConfig, TGroup1, TBeta

_abc_impl = <_abc._abc_data object>
amsgrad: bool
fused: Optional[bool]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchOptAdamW(**data)[source]

Bases: TorchOptBaseConfig, TGroup1, TBeta

_abc_impl = <_abc._abc_data object>
amsgrad: bool
fused: Optional[bool]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchOptAdamax(**data)[source]

Bases: TorchOptBaseConfig, TGroup1, TBeta

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchOptBaseConfig(**data)[source]

Bases: TorchBaseConfig

_abc_impl = <_abc._abc_data object>
learning_rate: float
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchOptLBFGS(**data)[source]

Bases: TorchOptBaseConfig

_abc_impl = <_abc._abc_data object>
history_size: int
line_search_fn: Optional[str]
max_eval: Optional[int]
max_iter: int
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

tolerance_change: float
tolerance_grad: float
class flatiron.torch.config.TorchOptNAdam(**data)[source]

Bases: TorchOptBaseConfig, TGroup1, TBeta

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

momentum_decay: float
class flatiron.torch.config.TorchOptRAdam(**data)[source]

Bases: TorchOptBaseConfig, TGroup1, TBeta

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class flatiron.torch.config.TorchOptRMSprop(**data)[source]

Bases: TorchOptBaseConfig, TGroup1

_abc_impl = <_abc._abc_data object>
alpha: float
centered: bool
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

momentum: float
class flatiron.torch.config.TorchOptRprop(**data)[source]

Bases: TorchOptBaseConfig, TCap, TDiff, TFor, TMax

_abc_impl = <_abc._abc_data object>
etas: tuple[float, float]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

step_sizes: tuple[float, float]
class flatiron.torch.config.TorchOptSGD(**data)[source]

Bases: TorchOptBaseConfig, TDecay, TDiff, TFor, TMax

_abc_impl = <_abc._abc_data object>
dampening: float
fused: Optional[bool]
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

momentum: float
nesterov: bool
class flatiron.torch.config.TorchOptSparseAdam(**data)[source]

Bases: TorchOptBaseConfig, TEps, TMax, TBeta

_abc_impl = <_abc._abc_data object>
model_config: ClassVar[ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

loss

flatiron.torch.loss.get(config)[source]

Get function from this module.

Parameters:

config (dict) – Loss config.

Returns:

Module function.

Return type:

function

metric

flatiron.torch.metric.get(config)[source]

Get function from this module.

Parameters:

config (dict) – Loss config.

Returns:

Module function.

Return type:

function

optimizer

flatiron.torch.optimizer.get(config, model)[source]

Get function from this module.

Parameters:
  • config (dict) – Optimizer config.

  • model (Module) – Torch model.

Returns:

Module function.

Return type:

function

tools

class flatiron.torch.tools.ModelCheckpoint(filepath, save_freq='epoch', **kwargs)[source]

Bases: object

Class for saving PyTorch models.

__init__(filepath, save_freq='epoch', **kwargs)[source]

Constructs ModelCheckpoint instance.

Parameters:
  • filepath (str or Path) – Filepath pattern.

  • save_freq (str, optional) – Save frequency. Default: epoch.

save(model, epoch)[source]

Save PyTorch model.

Parameters:
  • model (torch.nn.Module) – Model to be saved.

  • epoch (int) – Current epoch.

Return type:

None

class flatiron.torch.tools.TorchDataset(info, ext_regex='npy|exr|png|jpeg|jpg|tiff', calc_file_size=True, labels=None, label_axis=-1)[source]

Bases: Dataset, Dataset

Class for inheriting torch Dataset into flatiron Dataset.

static monkey_patch(dataset, channels_first=True)[source]

Construct and monkey patch a new TorchDataset instance from a given Dataset. Pytorch expects images in with the shape (C, H , W) per default.

Parameters:
  • dataset (Dataset) – Dataset.

  • channels_first (bool, optional) – Will convert any matrix of shape (H, W, C) into (C, H, W). Default: True.

Returns:

TorchDataset instance.

Return type:

TorchDataset

flatiron.torch.tools._execute_epoch(epoch, model, data_loader, optimizer, loss_func, device, metrics_funcs=[], writer=None, checkpoint=None, mode='train')[source]

Execute train or test epoch on given torch model.

Parameters:
  • epoch (int) – Current epoch.

  • model (torch.nn.Module) – Torch model.

  • data_loader (torch.utils.data.DataLoader) – Torch data loader.

  • optimizer (torch.optim.Optimizer) – Torch optimizer.

  • loss_func (torch.nn.Module) – Torch loss function.

  • metrics_funcs (list[Callable], optional) – List of torch metrics. Default: [].

  • writer (SummaryWriter, optional) – Tensorboard writer. Default: None.

  • checkpoint (ModelCheckpoint, optional) – Model saver. Default: None.

  • device (torch.device) – Torch device.

  • mode (str, optional) – Mode to execute. Options: [train, test]. Default: train.

Return type:

None

flatiron.torch.tools.compile(framework, model, optimizer, loss, metrics)[source]

Call torch.compile on given model with kwargs.

Parameters:
  • framework (dict) – Framework dict.

  • model (Any) – Model to be compiled.

  • optimizer (dict) – Optimizer config for compilation.

  • loss (str) – Loss to be compiled.

  • metrics (list[str]) – Metrics function to be compiled.

Returns:

Dict of compiled objects.

Return type:

dict

flatiron.torch.tools.get(config, module, fallback_module)[source]

Given a config and set of modules return an instance or function.

Parameters:
  • config (dict) – Instance config.

  • module (str) – Always __name__.

  • fallback_module (str) – Fallback module, either a tf or torch module.

Raises:

EnforceError – If config is not a dict with a name key.

Returns:

Instance or function.

Return type:

object

flatiron.torch.tools.get_callbacks(log_directory, checkpoint_pattern, checkpoint_params={})[source]

Create a list of callbacks for Tensoflow model.

Parameters:
  • log_directory (str or Path) – Tensorboard project log directory.

  • checkpoint_pattern (str) – Filepath pattern for checkpoint callback.

  • checkpoint_params (dict, optional) – Params to be passed to checkpoint callback. Default: {}.

Raises:
  • EnforceError – If log directory does not exist.

  • EnforeError – If checkpoint pattern does not contain ‘{epoch}’.

Returns:

Tensorboard and ModelCheckpoint callbacks.

Return type:

list

flatiron.torch.tools.pre_build(device)[source]
flatiron.torch.tools.resolve_config(config)[source]

Resolve configs handed to Torch classes. Replaces the following:

  • learning_rate

  • epsilon

  • clipping_threshold

  • exponent

  • norm_degree

  • beta_1

  • beta_2

Parameters:

config (dict) – Config dict.

Returns:

Resolved config.

Return type:

dict

flatiron.torch.tools.train(compiled, callbacks, train_data, test_data, params)[source]

Train Torch model.

Parameters:
  • compiled (dict) – Compiled objects.

  • callbacks (dict) – Dict of callbacks.

  • train_data (Dataset) – Training dataset.

  • test_data (Dataset) – Test dataset.

  • params (dict) – Training params.

Return type:

None