diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..37ac849a --- /dev/null +++ b/Dockerfile @@ -0,0 +1,85 @@ +FROM jupyter/base-notebook:latest + +# Install .NET CLI dependencies + +ARG NB_USER=fsdocs-user +ARG NB_UID=1000 +ENV USER ${NB_USER} +ENV NB_UID ${NB_UID} +ENV HOME /home/${NB_USER} + +WORKDIR ${HOME} + +USER root + +ENV \ + # Enable detection of running in a container + DOTNET_RUNNING_IN_CONTAINER=true \ + # Enable correct mode for dotnet watch (only mode supported in a container) + DOTNET_USE_POLLING_FILE_WATCHER=true \ + # Skip extraction of XML docs - generally not useful within an image/container - helps performance + NUGET_XMLDOC_MODE=skip \ + # Opt out of telemetry until after we install jupyter when building the image, this prevents caching of machine id + DOTNET_INTERACTIVE_CLI_TELEMETRY_OPTOUT=true \ + DOTNET_SDK_VERSION=5.0.202 + +# Install .NET CLI dependencies +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + libc6 \ + libgcc1 \ + libgssapi-krb5-2 \ + libicu66 \ + libssl1.1 \ + libstdc++6 \ + zlib1g \ + curl \ + git \ + && rm -rf /var/lib/apt/lists/* + +# When updating the SDK version, the sha512 value a few lines down must also be updated. +# Install .NET SDK +RUN curl -SL --output dotnet.tar.gz https://dotnetcli.azureedge.net/dotnet/Sdk/$DOTNET_SDK_VERSION/dotnet-sdk-$DOTNET_SDK_VERSION-linux-x64.tar.gz \ + && dotnet_sha512='01ed59f236184987405673d24940d55ce29d830e7dbbc19556fdc03893039e6046712de6f901dc9911047a0dee4fd15319b7e94f8a31df6b981fa35bd93d9838' \ + && echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ + && mkdir -p /usr/share/dotnet \ + && tar -ozxf dotnet.tar.gz -C /usr/share/dotnet \ + && rm dotnet.tar.gz \ + && ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet \ + # Trigger first run experience by running arbitrary cmd + && dotnet help + +# Copy notebooks +COPY ./ ${HOME}/notebooks/ + +# Copy package sources +# COPY ./NuGet.config ${HOME}/nuget.config + +RUN chown -R ${NB_UID} ${HOME} +USER ${USER} + +# Clone and build Furnace-cpu bundle to get the latest TorchSharp and libtorch-cpu packages downloaded and cached by nuget within the Docker image +# This the makes user experience faster when running #r "nuget: Furnace-cpu +RUN git clone --depth 1 https://github.com/Furnace/Furnace.git \ + && dotnet build Furnace/bundles/Furnace-cpu + +#Install nteract +RUN pip install nteract_on_jupyter + +# Install lastest build from master branch of Microsoft.DotNet.Interactive +RUN dotnet tool install -g --add-source "https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json" Microsoft.dotnet-interactive + +#latest stable from nuget.org +#RUN dotnet tool install -g Microsoft.dotnet-interactive --add-source "https://api.nuget.org/v3/index.json" + +ENV PATH="${PATH}:${HOME}/.dotnet/tools" +RUN echo "$PATH" + +# Install kernel specs +RUN dotnet interactive jupyter install + +# Enable telemetry once we install jupyter for the image +ENV DOTNET_INTERACTIVE_CLI_TELEMETRY_OPTOUT=false + +# Set root to notebooks +WORKDIR ${HOME}/notebooks/ \ No newline at end of file diff --git a/NuGet.config b/NuGet.config new file mode 100755 index 00000000..cf1ace51 --- /dev/null +++ b/NuGet.config @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/README.fsx b/README.fsx new file mode 100644 index 00000000..2e1d7dcc --- /dev/null +++ b/README.fsx @@ -0,0 +1,40 @@ +(** +# Running notebooks in MyBinder + +The `Dockerfile` and `NuGet.config` allow us to run generated notebooks in [MyBinder](https://mybinder.org) + +* `master` branch of fsprojects/furnace: [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/fsprojects/furnace/master) + +# Generating docs + +To iterate on docs (requires evaluation off since DLLs get locked) + + dotnet fsdocs watch + +To use a local build of FSharp.Formatting: + + git clone https://github.com/fsprojects/FSharp.Formatting ../FSharp.Formatting + pushd ..\FSharp.Formatting + .\build + popd + pop + +Then: + + ..\FSharp.Formatting\src\FSharp.Formatting.CommandTool\bin\Debug\net6.0\fsdocs.exe watch + ..\FSharp.Formatting\src\FSharp.Formatting.CommandTool\bin\Debug\net6.0\fsdocs.exe build --clean --eval + +## Generated Notebooks + +Notebooks are generated for all .md and .fsx files under docs as part of the build. + +* Dockerfile - see [https://github.com/dotnet/interactive/blob/master/docs/CreateBinder.md](https://github.com/dotnet/interactive/blob/master/docs/CreateBinder.md) + + +* NuGet.config - likewise + + +See MyBinder for creating URLs + +*) + diff --git a/README.html b/README.html new file mode 100644 index 00000000..7acf8724 --- /dev/null +++ b/README.html @@ -0,0 +1,157 @@ + + + + + Running notebooks in MyBinder + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+

Running notebooks in MyBinder

+

The Dockerfile and NuGet.config allow us to run generated notebooks in MyBinder

+
    +
  • master branch of fsprojects/furnace: Binder
  • +
+

Generating docs

+

To iterate on docs (requires evaluation off since DLLs get locked)

+
dotnet fsdocs watch 
+
+

To use a local build of FSharp.Formatting:

+
   git clone https://github.com/fsprojects/FSharp.Formatting  ../FSharp.Formatting
+   pushd ..
+   .
+   popd
+   pop
+
+

Then:

+
   ..
+   ..
+
+

Generated Notebooks

+

Notebooks are generated for all .md and .fsx files under docs as part of the build.

+ +

See MyBinder for creating URLs

+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/README.ipynb b/README.ipynb new file mode 100644 index 00000000..389b6a37 --- /dev/null +++ b/README.ipynb @@ -0,0 +1,60 @@ + + { + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["# Running notebooks in MyBinder\n", +"\n", +"The `Dockerfile` and `NuGet.config` allow us to run generated notebooks in [MyBinder](https://mybinder.org)\n", +"\n", +"* `master` branch of fsprojects/furnace: [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/fsprojects/furnace/master)\n", +"\n", +"# Generating docs\n", +"\n", +"To iterate on docs (requires evaluation off since DLLs get locked)\n", +"\n", +" dotnet fsdocs watch \n", +"\n", +"To use a local build of FSharp.Formatting:\n", +"\n", +" git clone https://github.com/fsprojects/FSharp.Formatting ../FSharp.Formatting\n", +" pushd ..\\FSharp.Formatting\n", +" .\\build\n", +" popd\n", +" pop\n", +"\n", +"Then:\n", +"\n", +" ..\\FSharp.Formatting\\src\\FSharp.Formatting.CommandTool\\bin\\Debug\\net6.0\\fsdocs.exe watch\n", +" ..\\FSharp.Formatting\\src\\FSharp.Formatting.CommandTool\\bin\\Debug\\net6.0\\fsdocs.exe build --clean --eval\n", +"\n", +"## Generated Notebooks\n", +"\n", +"Notebooks are generated for all .md and .fsx files under docs as part of the build.\n", +"\n", +"* Dockerfile - see [https://github.com/dotnet/interactive/blob/master/docs/CreateBinder.md](https://github.com/dotnet/interactive/blob/master/docs/CreateBinder.md)\n", +" \n", +"\n", +"* NuGet.config - likewise\n", +" \n", +"\n", +"See MyBinder for creating URLs\n", +"\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/content/fsdocs-custom.css b/content/fsdocs-custom.css new file mode 100644 index 00000000..c32465db --- /dev/null +++ b/content/fsdocs-custom.css @@ -0,0 +1,5 @@ + +/*-------------------------------------------------------------------------- + Customize your CSS here +/*--------------------------------------------------------------------------*/ + diff --git a/content/fsdocs-default.css b/content/fsdocs-default.css new file mode 100644 index 00000000..0e621250 --- /dev/null +++ b/content/fsdocs-default.css @@ -0,0 +1,634 @@ +/* @import url(https://fonts.googleapis.com/css?family=Droid+Sans|Droid+Sans+Mono|Open+Sans:400,600,700); */ +/* @import url('https://fonts.googleapis.com/css2?family=Hind+Vadodara&display=swap'); */ +@import url('https://fonts.googleapis.com/css2?family=Hind+Vadodara&family=Roboto+Mono&display=swap'); +/*-------------------------------------------------------------------------- + Formatting for page & standard document content +/*--------------------------------------------------------------------------*/ + +body { + /* font-family: 'Open Sans', serif; */ + /* font-family: Roboto, Helvetica Neue, Helvetica, Arial, sans-serif; */ + font-family: 'Hind Vadodara', sans-serif; + /* padding-top: 0px; + padding-bottom: 40px; +*/ +} + +/* Format the heading - nicer spacing etc. */ +.masthead { + overflow: hidden; +} + + .masthead .muted a { + text-decoration: none; + color: #999999; + } + + .masthead ul, .masthead li { + margin-bottom: 0px; + } + + .masthead .nav li { + margin-top: 15px; + font-size: 110%; + } + + .masthead h3 { + margin-top: 15px; + margin-bottom: 5px; + font-size: 170%; + } + +/*-------------------------------------------------------------------------- + Formatting fsdocs-content +/*--------------------------------------------------------------------------*/ + +/* Change font sizes for headings etc. */ +#fsdocs-content h1 { + margin: 30px 0px 15px 0px; + /* font-weight: 400; */ + font-size: 2rem; + letter-spacing: 1.78px; + line-height: 2.5rem; + text-transform: uppercase; + font-weight: 400; +} + +#fsdocs-content h2 { + font-size: 1.6rem; + margin: 20px 0px 10px 0px; + font-weight: 400; +} + +#fsdocs-content h3 { + font-size: 1.2rem; + margin: 15px 0px 10px 0px; + font-weight: 400; +} + +#fsdocs-content hr { + margin: 0px 0px 20px 0px; +} + +#fsdocs-content li { + font-size: 1.0rem; + line-height: 1.375rem; + letter-spacing: 0.01px; + font-weight: 500; + margin: 0px 0px 15px 0px; +} + +#fsdocs-content p { + font-size: 1.0rem; + line-height: 1.375rem; + letter-spacing: 0.01px; + font-weight: 500; + color: #262626; +} + +#fsdocs-content a { + color: #4974D1; +} +/* remove the default bootstrap bold on dt elements */ +#fsdocs-content dt { + font-weight: normal; +} + + + +/*-------------------------------------------------------------------------- + Formatting tables in fsdocs-content, using docs.microsoft.com tables +/*--------------------------------------------------------------------------*/ + +#fsdocs-content .table { + table-layout: auto; + width: 100%; + font-size: 0.875rem; +} + + #fsdocs-content .table caption { + font-size: 0.8rem; + font-weight: 600; + letter-spacing: 2px; + text-transform: uppercase; + padding: 1.125rem; + border-width: 0 0 1px; + border-style: solid; + border-color: #e3e3e3; + text-align: right; + } + + #fsdocs-content .table td, + #fsdocs-content .table th { + display: table-cell; + word-wrap: break-word; + padding: 0.75rem 1rem 0.75rem 0rem; + line-height: 1.5; + vertical-align: top; + border-top: 1px solid #e3e3e3; + border-right: 0; + border-left: 0; + border-bottom: 0; + border-style: solid; + } + + /* suppress the top line on inner lists such as tables of exceptions */ + #fsdocs-content .table .fsdocs-exception-list td, + #fsdocs-content .table .fsdocs-exception-list th { + border-top: 0 + } + + #fsdocs-content .table td p:first-child, + #fsdocs-content .table th p:first-child { + margin-top: 0; + } + + #fsdocs-content .table td.nowrap, + #fsdocs-content .table th.nowrap { + white-space: nowrap; + } + + #fsdocs-content .table td.is-narrow, + #fsdocs-content .table th.is-narrow { + width: 15%; + } + + #fsdocs-content .table th:not([scope='row']) { + border-top: 0; + border-bottom: 1px; + } + + #fsdocs-content .table > caption + thead > tr:first-child > td, + #fsdocs-content .table > colgroup + thead > tr:first-child > td, + #fsdocs-content .table > thead:first-child > tr:first-child > td { + border-top: 0; + } + + #fsdocs-content .table table-striped > tbody > tr:nth-of-type(odd) { + background-color: var(--box-shadow-light); + } + + #fsdocs-content .table.min { + width: unset; + } + + #fsdocs-content .table.is-left-aligned td:first-child, + #fsdocs-content .table.is-left-aligned th:first-child { + padding-left: 0; + } + + #fsdocs-content .table.is-left-aligned td:first-child a, + #fsdocs-content .table.is-left-aligned th:first-child a { + outline-offset: -0.125rem; + } + +@media screen and (max-width: 767px), screen and (min-resolution: 120dpi) and (max-width: 767.9px) { + #fsdocs-content .table.is-stacked-mobile td:nth-child(1) { + display: block; + width: 100%; + padding: 1rem 0; + } + + #fsdocs-content .table.is-stacked-mobile td:not(:nth-child(1)) { + display: block; + border-width: 0; + padding: 0 0 1rem; + } +} + +#fsdocs-content .table.has-inner-borders th, +#fsdocs-content .table.has-inner-borders td { + border-right: 1px solid #e3e3e3; +} + + #fsdocs-content .table.has-inner-borders th:last-child, + #fsdocs-content .table.has-inner-borders td:last-child { + border-right: none; + } + +.fsdocs-entity-list .fsdocs-entity-name { + width: 25%; + font-weight: bold; +} + +.fsdocs-member-list .fsdocs-member-usage { + width: 35%; +} + +/*-------------------------------------------------------------------------- + Formatting xmldoc sections in fsdocs-content +/*--------------------------------------------------------------------------*/ + +.fsdocs-xmldoc, .fsdocs-entity-xmldoc, .fsdocs-member-xmldoc { + font-size: 1.0rem; + line-height: 1.375rem; + letter-spacing: 0.01px; + font-weight: 500; + color: #262626; +} + +.fsdocs-xmldoc h1 { + font-size: 1.2rem; + margin: 10px 0px 0px 0px; +} + +.fsdocs-xmldoc h2 { + font-size: 1.2rem; + margin: 10px 0px 0px 0px; +} + +.fsdocs-xmldoc h3 { + font-size: 1.1rem; + margin: 10px 0px 0px 0px; +} + +/* #fsdocs-nav .searchbox { + margin-top: 30px; + margin-bottom: 30px; +} */ + +#fsdocs-nav img.logo{ + width:90%; + /* height:140px; */ + /* margin:10px 0px 0px 20px; */ + margin-top:40px; + border-style:none; +} + +#fsdocs-nav input{ + /* margin-left: 20px; */ + margin-right: 20px; + margin-top: 20px; + margin-bottom: 20px; + width: 93%; + -webkit-border-radius: 0; + border-radius: 0; +} + +#fsdocs-nav { + /* margin-left: -5px; */ + /* width: 90%; */ + font-size:0.95rem; +} + +#fsdocs-nav li.nav-header{ + /* margin-left: -5px; */ + /* width: 90%; */ + padding-left: 0; + color: #262626; + text-transform: none; + font-size:16px; +} + +#fsdocs-nav a{ + padding-left: 0; + color: #6c6c6d; + /* margin-left: 5px; */ + /* width: 90%; */ +} + +/*-------------------------------------------------------------------------- + Formatting pre and code sections in fsdocs-content (code highlighting is + further below) +/*--------------------------------------------------------------------------*/ + +#fsdocs-content code { + /* font-size: 0.83rem; */ + font: 0.85rem 'Roboto Mono', monospace; + background-color: #f7f7f900; + border: 0px; + padding: 0px; + /* word-wrap: break-word; */ + /* white-space: pre; */ +} + +/* omitted */ +#fsdocs-content span.omitted { + background: #3c4e52; + border-radius: 5px; + color: #808080; + padding: 0px 0px 1px 0px; +} + +#fsdocs-content pre .fssnip code { + font: 0.86rem 'Roboto Mono', monospace; +} + +#fsdocs-content table.pre, +#fsdocs-content pre.fssnip, +#fsdocs-content pre { + line-height: 13pt; + border: 0px solid #d8d8d8; + border-top: 0px solid #e3e3e3; + border-collapse: separate; + white-space: pre; + font: 0.86rem 'Roboto Mono', monospace; + width: 100%; + margin: 10px 0px 20px 0px; + background-color: #f3f4f7; + padding: 10px; + border-radius: 5px; + color: #8e0e2b; + max-width: none; + box-sizing: border-box; +} + +#fsdocs-content pre.fssnip code { + font: 0.86rem 'Roboto Mono', monospace; + font-weight: 600; +} + +#fsdocs-content table.pre { + background-color: #fff7ed; +} + +#fsdocs-content table.pre pre { + padding: 0px; + margin: 0px; + border-radius: 0px; + width: 100%; + background-color: #fff7ed; + color: #837b79; +} + +#fsdocs-content table.pre td { + padding: 0px; + white-space: normal; + margin: 0px; + width: 100%; +} + +#fsdocs-content table.pre td.lines { + width: 30px; +} + + +#fsdocs-content pre { + word-wrap: inherit; +} + +.fsdocs-example-header { + font-size: 1.0rem; + line-height: 1.375rem; + letter-spacing: 0.01px; + font-weight: 700; + color: #262626; +} + +/*-------------------------------------------------------------------------- + Formatting github source links +/*--------------------------------------------------------------------------*/ + +.fsdocs-source-link { + float: right; + text-decoration: none; +} + + .fsdocs-source-link img { + border-style: none; + margin-left: 10px; + width: auto; + height: 1.4em; + } + + .fsdocs-source-link .hover { + display: none; + } + + .fsdocs-source-link:hover .hover { + display: block; + } + + .fsdocs-source-link .normal { + display: block; + } + + .fsdocs-source-link:hover .normal { + display: none; + } + +/*-------------------------------------------------------------------------- + Formatting logo +/*--------------------------------------------------------------------------*/ + +#fsdocs-logo { + width:140px; + height:140px; + margin:10px 0px 0px 0px; + border-style:none; +} + +/*-------------------------------------------------------------------------- + +/*--------------------------------------------------------------------------*/ + +#fsdocs-content table.pre pre { + padding: 0px; + margin: 0px; + border: none; +} + +/*-------------------------------------------------------------------------- + Remove formatting from links +/*--------------------------------------------------------------------------*/ + +#fsdocs-content h1 a, +#fsdocs-content h1 a:hover, +#fsdocs-content h1 a:focus, +#fsdocs-content h2 a, +#fsdocs-content h2 a:hover, +#fsdocs-content h2 a:focus, +#fsdocs-content h3 a, +#fsdocs-content h3 a:hover, +#fsdocs-content h3 a:focus, +#fsdocs-content h4 a, +#fsdocs-content h4 a:hover, #fsdocs-content +#fsdocs-content h4 a:focus, +#fsdocs-content h5 a, +#fsdocs-content h5 a:hover, +#fsdocs-content h5 a:focus, +#fsdocs-content h6 a, +#fsdocs-content h6 a:hover, +#fsdocs-content h6 a:focus { + color: #262626; + text-decoration: none; + text-decoration-style: none; + /* outline: none */ +} + +/*-------------------------------------------------------------------------- + Formatting for F# code snippets +/*--------------------------------------------------------------------------*/ + +.fsdocs-param-name, +.fsdocs-return-name, +.fsdocs-param { + font-weight: 900; + font-size: 0.85rem; + font-family: 'Roboto Mono', monospace; +} +/* strings --- and stlyes for other string related formats */ +#fsdocs-content span.s { + color: #dd1144; +} +/* printf formatters */ +#fsdocs-content span.pf { + color: #E0C57F; +} +/* escaped chars */ +#fsdocs-content span.e { + color: #EA8675; +} + +/* identifiers --- and styles for more specific identifier types */ +#fsdocs-content span.id { + color: #262626; +} +/* module */ +#fsdocs-content span.m { + color: #009999; +} +/* reference type */ +#fsdocs-content span.rt { + color: #4974D1; +} +/* value type */ +#fsdocs-content span.vt { + color: #43AEC6; +} +/* interface */ +#fsdocs-content span.if { + color: #43AEC6; +} +/* type argument */ +#fsdocs-content span.ta { + color: #43AEC6; +} +/* disposable */ +#fsdocs-content span.d { + color: #43AEC6; +} +/* property */ +#fsdocs-content span.prop { + color: #43AEC6; +} +/* punctuation */ +#fsdocs-content span.p { + color: #43AEC6; +} +#fsdocs-content span.pn { + color: #262626; +} +/* function */ +#fsdocs-content span.f { + color: #e1e1e1; +} +#fsdocs-content span.fn { + color: #990000; +} +/* active pattern */ +#fsdocs-content span.pat { + color: #4ec9b0; +} +/* union case */ +#fsdocs-content span.u { + color: #4ec9b0; +} +/* enumeration */ +#fsdocs-content span.e { + color: #4ec9b0; +} +/* keywords */ +#fsdocs-content span.k { + color: #b68015; + /* font-weight: bold; */ +} +/* comment */ +#fsdocs-content span.c { + color: #808080; + font-weight: 400; + font-style: italic; +} +/* operators */ +#fsdocs-content span.o { + color: #af75c1; +} +/* numbers */ +#fsdocs-content span.n { + color: #009999; +} +/* line number */ +#fsdocs-content span.l { + color: #80b0b0; +} +/* mutable var or ref cell */ +#fsdocs-content span.v { + color: #d1d1d1; + font-weight: bold; +} +/* inactive code */ +#fsdocs-content span.inactive { + color: #808080; +} +/* preprocessor */ +#fsdocs-content span.prep { + color: #af75c1; +} +/* fsi output */ +#fsdocs-content span.fsi { + color: #808080; +} + +/* tool tip */ +div.fsdocs-tip { + background: #475b5f; + border-radius: 4px; + font: 0.85rem 'Roboto Mono', monospace; + padding: 6px 8px 6px 8px; + display: none; + color: #d1d1d1; + pointer-events: none; +} + + div.fsdocs-tip code { + color: #d1d1d1; + font: 0.85rem 'Roboto Mono', monospace; + } + +.button { +display: inline-block; +border-radius: 2px; +background-color: #262626; +border: none; +color: #FFFFFF; +text-align: center; +font-size: 18px; +padding: 20px; +width: 120px; +transition: all 0.5s; +cursor: pointer; +/* margin: 5px; */ +opacity: 0.85; +} + +.button span { +cursor: pointer; +display: inline-block; +position: relative; +transition: 0.25s; +} + +.button:hover { + opacity: 1; + background-color: #8e0e2b; +} + +.button:hover span { +padding-left: 5px; +/* opacity: 1; */ +} + +.button:hover span:after { +opacity: 1; +/* right: 0; */ +} diff --git a/content/fsdocs-search.js b/content/fsdocs-search.js new file mode 100755 index 00000000..3d543cf3 --- /dev/null +++ b/content/fsdocs-search.js @@ -0,0 +1,84 @@ +var lunrIndex, pagesIndex; + +function endsWith(str, suffix) { + return str.indexOf(suffix, str.length - suffix.length) !== -1; +} + +// Initialize lunrjs using our generated index file +function initLunr() { + if (!endsWith(fsdocs_search_baseurl,"/")){ + fsdocs_search_baseurl = fsdocs_search_baseurl+'/' + }; + + // First retrieve the index file + $.getJSON(fsdocs_search_baseurl +"index.json") + .done(function(index) { + pagesIndex = index; + // Set up lunrjs by declaring the fields we use + // Also provide their boost level for the ranking + lunrIndex = lunr(function() { + this.ref("uri"); + this.field('title', { + boost: 15 + }); + this.field('tags', { + boost: 10 + }); + this.field("content", { + boost: 5 + }); + + this.pipeline.remove(lunr.stemmer); + this.searchPipeline.remove(lunr.stemmer); + + // Feed lunr with each file and let lunr actually index them + pagesIndex.forEach(function(page) { + this.add(page); + }, this); + }) + }) + .fail(function(jqxhr, textStatus, error) { + var err = textStatus + ", " + error; + console.error("Error getting Hugo index file:", err); + }); +} + +/** + * Trigger a search in lunr and transform the result + * + * @param {String} query + * @return {Array} results + */ +function search(queryTerm) { + // Find the item in our index corresponding to the lunr one to have more info + return lunrIndex.search(queryTerm+"^100"+" "+queryTerm+"*^10"+" "+"*"+queryTerm+"^10"+" "+queryTerm+"~2^1").map(function(result) { + return pagesIndex.filter(function(page) { + return page.uri === result.ref; + })[0]; + }); +} + +// Let's get started +initLunr(); + +$( document ).ready(function() { + var searchList = new autoComplete({ + /* selector for the search box element */ + minChars: 1, + selector: $("#search-by").get(0), + /* source is the callback to perform the search */ + source: function(term, response) { + response(search(term)); + }, + /* renderItem displays individual search results */ + renderItem: function(item, search) { + search = search.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&'); + var re = new RegExp("(" + search.split(' ').join('|') + ")", "gi"); + return '
' + item.title.replace(re, "$1") + '
'; + }, + /* onSelect callback fires when a search suggestion is chosen */ + onSelect: function(e, term, item) { + location.href = item.getAttribute('data-uri'); + } + }); +}); diff --git a/content/fsdocs-tips.js b/content/fsdocs-tips.js new file mode 100755 index 00000000..bcd04cb1 --- /dev/null +++ b/content/fsdocs-tips.js @@ -0,0 +1,54 @@ +var currentTip = null; +var currentTipElement = null; + +function hideTip(evt, name, unique) { + var el = document.getElementById(name); + el.style.display = "none"; + currentTip = null; +} + +function findPos(obj) { + // no idea why, but it behaves differently in webbrowser component + if (window.location.search == "?inapp") + return [obj.offsetLeft + 10, obj.offsetTop + 30]; + + var curleft = 0; + var curtop = obj.offsetHeight; + while (obj) { + curleft += obj.offsetLeft; + curtop += obj.offsetTop; + obj = obj.offsetParent; + }; + return [curleft, curtop]; +} + +function hideUsingEsc(e) { + if (!e) { e = event; } + hideTip(e, currentTipElement, currentTip); +} + +function showTip(evt, name, unique, owner) { + document.onkeydown = hideUsingEsc; + if (currentTip == unique) return; + currentTip = unique; + currentTipElement = name; + + var pos = findPos(owner ? owner : (evt.srcElement ? evt.srcElement : evt.target)); + var posx = pos[0]; + var posy = pos[1]; + + var el = document.getElementById(name); + var parent = (document.documentElement == null) ? document.body : document.documentElement; + el.style.position = "absolute"; + el.style.left = posx + "px"; + el.style.top = posy + "px"; + el.style.display = "block"; +} +function Clipboard_CopyTo(value) { + var tempInput = document.createElement("input"); + tempInput.value = value; + document.body.appendChild(tempInput); + tempInput.select(); + document.execCommand("copy"); + document.body.removeChild(tempInput); +} \ No newline at end of file diff --git a/content/img/copy-blue.png b/content/img/copy-blue.png new file mode 100644 index 00000000..60fea167 Binary files /dev/null and b/content/img/copy-blue.png differ diff --git a/content/img/copy-md-blue.png b/content/img/copy-md-blue.png new file mode 100644 index 00000000..b14e9412 Binary files /dev/null and b/content/img/copy-md-blue.png differ diff --git a/content/img/copy-md-hover.png b/content/img/copy-md-hover.png new file mode 100755 index 00000000..b14e9412 Binary files /dev/null and b/content/img/copy-md-hover.png differ diff --git a/content/img/copy-md.png b/content/img/copy-md.png new file mode 100644 index 00000000..72de7381 Binary files /dev/null and b/content/img/copy-md.png differ diff --git a/content/img/copy-xml-hover.png b/content/img/copy-xml-hover.png new file mode 100755 index 00000000..60fea167 Binary files /dev/null and b/content/img/copy-xml-hover.png differ diff --git a/content/img/copy-xml.png b/content/img/copy-xml.png new file mode 100755 index 00000000..e5606b90 Binary files /dev/null and b/content/img/copy-xml.png differ diff --git a/content/img/copy.png b/content/img/copy.png new file mode 100644 index 00000000..e5606b90 Binary files /dev/null and b/content/img/copy.png differ diff --git a/content/img/github-blue.png b/content/img/github-blue.png new file mode 100644 index 00000000..65971d4d Binary files /dev/null and b/content/img/github-blue.png differ diff --git a/content/img/github-hover.png b/content/img/github-hover.png new file mode 100755 index 00000000..65971d4d Binary files /dev/null and b/content/img/github-hover.png differ diff --git a/content/img/github.png b/content/img/github.png new file mode 100644 index 00000000..ff34f354 Binary files /dev/null and b/content/img/github.png differ diff --git a/content/navbar-fixed-left.css b/content/navbar-fixed-left.css new file mode 100755 index 00000000..2de6255a --- /dev/null +++ b/content/navbar-fixed-left.css @@ -0,0 +1,77 @@ +body { + padding-top: 90px; +} + +@media (min-width: 768px) { + body { + padding-top: 0; + } +} + +@media (min-width: 768px) { + body { + margin-left: 252px; + } +} +.navbar { + overflow-y: auto; + overflow-x: hidden; + box-shadow: none; +} +.navbar.fixed-left { + position: fixed; + top: 0; + left: 0; + right: 0; + z-index: 1030; +} +.navbar-nav .nav-link { + padding-top: 0.3rem; + padding-bottom: 0.3rem; +} + +@media (min-width: 768px) { + .navbar.fixed-left { + bottom: 0; + width: 252px; + flex-flow: column nowrap; + align-items: flex-start; + } + + .navbar.fixed-left .navbar-collapse { + flex-grow: 0; + flex-direction: column; + width: 100%; + } + + .navbar.fixed-left .navbar-collapse .navbar-nav { + flex-direction: column; + width: 100%; + } + + .navbar.fixed-left .navbar-collapse .navbar-nav .nav-item { + width: 100%; + } + + .navbar.fixed-left .navbar-collapse .navbar-nav .nav-item .dropdown-menu { + top: 0; + } +} + +@media (min-width: 768px) { + .navbar.fixed-left { + right: auto; + } + + .navbar.fixed-left .navbar-nav .nav-item .dropdown-toggle:after { + border-top: 0.3em solid transparent; + border-left: 0.3em solid; + border-bottom: 0.3em solid transparent; + border-right: none; + vertical-align: baseline; + } + + .navbar.fixed-left .navbar-nav .nav-item .dropdown-menu { + left: 100%; + } +} diff --git a/content/navbar-fixed-right.css b/content/navbar-fixed-right.css new file mode 100755 index 00000000..ad6cef83 --- /dev/null +++ b/content/navbar-fixed-right.css @@ -0,0 +1,78 @@ +body { + padding-top: 90px; +} + +@media (min-width: 768px) { + body { + padding-top: 0; + } +} + +@media (min-width: 768px) { + body { + margin-right: 252px; + } +} + +.navbar { + overflow-y: auto; + overflow-x: hidden; + box-shadow: none; +} +.navbar.fixed-right { + position: fixed; + top: 0; + left: 0; + right: 0; + z-index: 1030; +} +.navbar-nav .nav-link { + padding-top: 0.3rem; + padding-bottom: 0.3rem; +} +@media (min-width: 768px) { + .navbar.fixed-right { + bottom: 0; + width: 252px; + flex-flow: column nowrap; + align-items: flex-start; + } + + .navbar.fixed-right .navbar-collapse { + flex-grow: 0; + flex-direction: column; + width: 100%; + } + + .navbar.fixed-right .navbar-collapse .navbar-nav { + flex-direction: column; + width: 100%; + } + + .navbar.fixed-right .navbar-collapse .navbar-nav .nav-item { + width: 100%; + } + + .navbar.fixed-right .navbar-collapse .navbar-nav .nav-item .dropdown-menu { + top: 0; + } +} + +@media (min-width: 768px) { + .navbar.fixed-right { + left: auto; + } + + .navbar.fixed-right .navbar-nav .nav-item .dropdown-toggle:after { + border-top: 0.3em solid transparent; + border-left: none; + border-bottom: 0.3em solid transparent; + border-right: 0.3em solid; + vertical-align: baseline; + } + + .navbar.fixed-right .navbar-nav .nav-item .dropdown-menu { + left: auto; + right: 100%; + } +} diff --git a/differentiable-programming.fsx b/differentiable-programming.fsx new file mode 100644 index 00000000..c42a6cdd --- /dev/null +++ b/differentiable-programming.fsx @@ -0,0 +1,4 @@ +// PyTorch style + +// Furnace style + diff --git a/differentiable-programming.html b/differentiable-programming.html new file mode 100644 index 00000000..0f07c035 --- /dev/null +++ b/differentiable-programming.html @@ -0,0 +1,131 @@ + + + + + differentiable-programming + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
// PyTorch style
+
+// Furnace style
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/differentiable-programming.ipynb b/differentiable-programming.ipynb new file mode 100644 index 00000000..0d6e714f --- /dev/null +++ b/differentiable-programming.ipynb @@ -0,0 +1,26 @@ + + { + "cells": [ + { + "cell_type": "code", + "metadata": {}, + "execution_count": 1, "outputs": [], + "source": ["// PyTorch style\n", +"\n", +"// Furnace style\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/extensions.fsx b/extensions.fsx new file mode 100644 index 00000000..52ee772d --- /dev/null +++ b/extensions.fsx @@ -0,0 +1,107 @@ +#r "nuget: Furnace-lite,1.0.8" +(** +[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/extensions.ipynb)  +[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath=extensions.ipynb)  +[![Script](img/badge-script.svg)](extensions.fsx)  +[![Script](img/badge-notebook.svg)](extensions.ipynb) + +# Extending Furnace + +Furnace provides most of the essential operations found in tensor libraries such as [NumPy](https://numpy.org/), [PyTorch](https://pytorch.org/), and [TensorFlow](https://www.tensorflow.org/). All differentiable operations support the forward, reverse, and nested differentiation modes. + +When implementing new operations, you should prefer to implement these as compositions of existing Furnace [Tensor](https://fsprojects.github.io/Furnace/reference/furnace-tensor.html) operations, which would give you differentiability out of the box. + +In the rare cases where you need to extend Furnace with a completely new differentiable operation that cannot be implemented as a composition of existing operations, you can use the provided extension API. + +## Simple elementwise functions + +If the function you would like to implement is a simple elementwise function, you can use the [UnaryOpElementwise](https://fsprojects.github.io/Furnace/reference/furnace-unaryopelementwise.html) or [BinaryOpElementwise](https://fsprojects.github.io/Furnace/reference/furnace-binaryopelementwise.html) types to define your function and its derivatives. The forward, reverse, and nested differentiation rules for the function are automatically generated by the type. The documentation of these two types detail how they should be instantiated. + +Let's see several examples. + +$f(a) = \mathrm{sin}(a)$, with derivative $\frac{\partial f(a)}{\partial a} = \mathrm{cos}(a) \;$. + +*) +open Furnace + +type Tensor with + member a.sin() = + Tensor.Op + { new UnaryOpElementwise("sin") with + member _.fRaw(a) = a.SinT() + member _.dfda(a,f) = a.cos() + } + (a) +(** +$f(a) = \mathrm{log}(a)$, with derivative $\frac{\partial f(a)}{\partial a} = 1/a \;$. + +*) +type Tensor with + member a.log() = + Tensor.Op + { new UnaryOpElementwise("log") with + member _.fRaw(a) = a.LogT() + member _.dfda(a,f) = 1/a + } + (a) +(** +$f(a, b) = ab$, with derivatives $\frac{\partial f(a, b)}{\partial a} = b$, $\frac{\partial f(a, b)}{\partial b} = a \;$. + +*) +type Tensor with + member a.mul(b) = + Tensor.Op + { new BinaryOpElementwise("mul") with + member _.fRaw(a,b) = a.MulTT(b) + member _.dfda(a,b,f) = b + member _.dfdb(a,b,f) = a + } + (a,b) +(** +$f(a, b) = a^b$, with derivatives $\frac{\partial f(a, b)}{\partial a} = b a^{b-1}$, $\frac{\partial f(a, b)}{\partial b} = a^b \mathrm{log}(a) \;$. Note the use of the argument `f` in the derivative definitions that makes use of the pre-computed value of $f(a, b) = a^b$ that is available to the derivative implementation. + +*) +type Tensor with + member a.pow(b) = + Tensor.Op + { new BinaryOpElementwise("pow") with + member _.fRaw(a,b) = a.PowTT(b) + member _.dfda(a,b,f) = b * f / a // equivalent to b * a.pow(b-1) + member _.dfdb(a,b,f) = f * a.log() // equivalent to a.pow(b) * a.log() + } + (a,b) +(** +## General functions + +For more complicated functions, you can use the most general way of defining functions using the [UnaryOp](https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html) or [BinaryOp](https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html) types, which allow you to define the full forward and reverse mode differentiation rules. The documentation of these two types detail how they should be instantiated. + +Let's see several examples. + +$f(A) = A^{\intercal}$, with the forward derivative propagation rule $\frac{\partial f(A)}{\partial X} = \frac{\partial A}{\partial X} \frac{\partial f(A)}{\partial A} = (\frac{\partial A}{\partial X})^{\intercal}$ and the reverse derivative propagation rule $\frac{\partial Y}{\partial A} = \frac{\partial Y}{\partial f(A)} \frac{\partial f(A)}{\partial A} = (\frac{\partial Y}{\partial f(A)})^{\intercal} \;$. + +*) +type Tensor with + member a.transpose() = + Tensor.Op + { new UnaryOp("transpose") with + member _.fRaw(a) = a.TransposeT2() + member _.ad_dfda(a,ad,f) = ad.transpose() + member _.fd_dfda(a,f,fd) = fd.transpose() + } + (a) +(** +$f(A, B) = AB$, with the forward derivative propagation rule $\frac{\partial(A, B)}{\partial X} = \frac{\partial A}{\partial X} \frac{\partial f(A, B)}{\partial A} + \frac{\partial B}{\partial X} \frac{\partial f(A, B)}{\partial B} = \frac{\partial A}{\partial X} B + A \frac{\partial B}{\partial X}$ and the reverse propagation rule $\frac{\partial Y}{\partial A} = \frac{\partial Y}{\partial f(A, B)} \frac{\partial f(A, B)}{\partial A} = \frac{\partial Y}{\partial f(A, B)} B^{\intercal}$, $\frac{\partial Y}{\partial B} = \frac{\partial Y}{\partial f(A, B)} \frac{\partial f(A, B)}{B} = A^{\intercal} \frac{\partial Y}{\partial f(A, B)} \;$. + +*) +type Tensor with + member a.matmul(b) = + Tensor.Op + { new BinaryOp("matmul") with + member _.fRaw(a,b) = a.MatMulTT(b) + member _.ad_dfda(a,ad,b,f) = ad.matmul(b) + member _.bd_dfdb(a,b,bd,f) = a.matmul(bd) + member _.fd_dfda(a,b,f,fd) = fd.matmul(b.transpose()) + member _.fd_dfdb(a,b,f,fd) = a.transpose().matmul(fd) + } + (a,b) + diff --git a/extensions.html b/extensions.html new file mode 100644 index 00000000..c0418e6e --- /dev/null +++ b/extensions.html @@ -0,0 +1,351 @@ + + + + + Extending Furnace + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+

Binder  +Binder  +Script  +Script

+

Extending Furnace

+

Furnace provides most of the essential operations found in tensor libraries such as NumPy, PyTorch, and TensorFlow. All differentiable operations support the forward, reverse, and nested differentiation modes.

+

When implementing new operations, you should prefer to implement these as compositions of existing Furnace Tensor operations, which would give you differentiability out of the box.

+

In the rare cases where you need to extend Furnace with a completely new differentiable operation that cannot be implemented as a composition of existing operations, you can use the provided extension API.

+

Simple elementwise functions

+

If the function you would like to implement is a simple elementwise function, you can use the UnaryOpElementwise or BinaryOpElementwise types to define your function and its derivatives. The forward, reverse, and nested differentiation rules for the function are automatically generated by the type. The documentation of these two types detail how they should be instantiated.

+

Let's see several examples.

+

\(f(a) = \mathrm{sin}(a)\), with derivative \(\frac{\partial f(a)}{\partial a} = \mathrm{cos}(a) \;\).

+
open Furnace
+
+type Tensor with
+    member a.sin() = 
+        Tensor.Op
+            { new UnaryOpElementwise("sin") with 
+                member _.fRaw(a) = a.SinT()
+                member _.dfda(a,f) = a.cos()
+            }
+            (a)
+
+

\(f(a) = \mathrm{log}(a)\), with derivative \(\frac{\partial f(a)}{\partial a} = 1/a \;\).

+
type Tensor with
+    member a.log() =
+        Tensor.Op
+            { new UnaryOpElementwise("log") with
+                member _.fRaw(a) = a.LogT()
+                member _.dfda(a,f) = 1/a
+            }
+            (a)
+
+

\(f(a, b) = ab\), with derivatives \(\frac{\partial f(a, b)}{\partial a} = b\), \(\frac{\partial f(a, b)}{\partial b} = a \;\).

+
type Tensor with
+    member a.mul(b) =
+        Tensor.Op
+            { new BinaryOpElementwise("mul") with
+                member _.fRaw(a,b) = a.MulTT(b)
+                member _.dfda(a,b,f) = b
+                member _.dfdb(a,b,f) = a
+            }
+            (a,b)
+
+

\(f(a, b) = a^b\), with derivatives \(\frac{\partial f(a, b)}{\partial a} = b a^{b-1}\), \(\frac{\partial f(a, b)}{\partial b} = a^b \mathrm{log}(a) \;\). Note the use of the argument f in the derivative definitions that makes use of the pre-computed value of \(f(a, b) = a^b\) that is available to the derivative implementation.

+
type Tensor with
+    member a.pow(b) =
+        Tensor.Op
+            { new BinaryOpElementwise("pow") with
+                member _.fRaw(a,b) = a.PowTT(b)
+                member _.dfda(a,b,f) = b * f / a  // equivalent to b * a.pow(b-1)
+                member _.dfdb(a,b,f) = f * a.log()  // equivalent to a.pow(b) * a.log()
+            }
+            (a,b)
+
+

General functions

+

For more complicated functions, you can use the most general way of defining functions using the UnaryOp or BinaryOp types, which allow you to define the full forward and reverse mode differentiation rules. The documentation of these two types detail how they should be instantiated.

+

Let's see several examples.

+

\(f(A) = A^{\intercal}\), with the forward derivative propagation rule \(\frac{\partial f(A)}{\partial X} = \frac{\partial A}{\partial X} \frac{\partial f(A)}{\partial A} = (\frac{\partial A}{\partial X})^{\intercal}\) and the reverse derivative propagation rule \(\frac{\partial Y}{\partial A} = \frac{\partial Y}{\partial f(A)} \frac{\partial f(A)}{\partial A} = (\frac{\partial Y}{\partial f(A)})^{\intercal} \;\).

+
type Tensor with
+    member a.transpose() =
+        Tensor.Op
+            { new UnaryOp("transpose") with
+                member _.fRaw(a) = a.TransposeT2()
+                member _.ad_dfda(a,ad,f) = ad.transpose()
+                member _.fd_dfda(a,f,fd) = fd.transpose()
+            }
+            (a)
+
+

\(f(A, B) = AB\), with the forward derivative propagation rule \(\frac{\partial(A, B)}{\partial X} = \frac{\partial A}{\partial X} \frac{\partial f(A, B)}{\partial A} + \frac{\partial B}{\partial X} \frac{\partial f(A, B)}{\partial B} = \frac{\partial A}{\partial X} B + A \frac{\partial B}{\partial X}\) and the reverse propagation rule \(\frac{\partial Y}{\partial A} = \frac{\partial Y}{\partial f(A, B)} \frac{\partial f(A, B)}{\partial A} = \frac{\partial Y}{\partial f(A, B)} B^{\intercal}\), \(\frac{\partial Y}{\partial B} = \frac{\partial Y}{\partial f(A, B)} \frac{\partial f(A, B)}{B} = A^{\intercal} \frac{\partial Y}{\partial f(A, B)} \;\).

+
type Tensor with
+    member a.matmul(b) =
+        Tensor.Op
+            { new BinaryOp("matmul") with
+                member _.fRaw(a,b) = a.MatMulTT(b)
+                member _.ad_dfda(a,ad,b,f) = ad.matmul(b)
+                member _.bd_dfdb(a,b,bd,f) = a.matmul(bd)
+                member _.fd_dfda(a,b,f,fd) = fd.matmul(b.transpose())
+                member _.fd_dfdb(a,b,f,fd) = a.transpose().matmul(fd)
+            }
+            (a,b)
+
+ +
namespace Furnace
+
type FurnaceImage = + static member abs: input: Tensor -> Tensor + static member acos: input: Tensor -> Tensor + static member add: a: Tensor * b: Tensor -> Tensor + static member arange: endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member arangeLike: input: Tensor * endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member argmax: input: Tensor -> int[] + 1 overload + static member argmin: input: Tensor -> int[] + 1 overload + static member asin: input: Tensor -> Tensor + static member atan: input: Tensor -> Tensor + static member backends: unit -> Backend list + ...
<summary> + Tensor operations +</summary>
+
static member Furnace.FurnaceImage.config: unit -> Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer
static member Furnace.FurnaceImage.config: configuration: (Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer) -> unit
static member Furnace.FurnaceImage.config: ?device: Furnace.Device * ?dtype: Furnace.Dtype * ?backend: Furnace.Backend * ?printer: Furnace.Printer -> unit
+
Multiple items
module Backend + +from Furnace
<summary> + Contains functions and settings related to backend specifications. +</summary>

--------------------
type Backend = + | Reference + | Torch + | Other of name: string * code: int + override ToString: unit -> string + member Name: string
<summary> + Represents a backend for Furnace tensors +</summary>
+
union case Furnace.Backend.Reference: Furnace.Backend
<summary> + The reference backend +</summary>
+
static member Furnace.FurnaceImage.seed: ?seed: int -> unit
+
type Tensor = + private | TensorC of primalRaw: RawTensor + | TensorF of primal: Tensor * derivative: Tensor * nestingTag: uint32 + | TensorR of primal: Tensor * derivative: Tensor ref * parentOp: TensorOp * fanout: uint32 ref * nestingTag: uint32 + interface IConvertible + interface IComparable + override Equals: other: obj -> bool + override GetHashCode: unit -> int + member GetSlice: bounds: int[,] -> Tensor + override ToString: unit -> string + member abs: unit -> Tensor + member acos: unit -> Tensor + member add: b: Tensor -> Tensor + 1 overload + member addSlice: location: seq<int> * b: Tensor -> Tensor + ...
<summary> + Represents a multi-dimensional data type containing elements of a single data type. + </summary>
<example> + A tensor can be constructed from a list or sequence using <see cref="M:Furnace.FurnaceImage.tensor(System.Object)" /><code> + let t = FurnaceImage.tensor([[1.; -1.]; [1.; -1.]]) + </code></example>
+
val a: Tensor
+
static member Tensor.Op: ext: BinaryOp -> (Tensor * Tensor -> Tensor)
static member Tensor.Op: ext: UnaryOp -> (Tensor -> Tensor)
+
Multiple items
type UnaryOpElementwise = + inherit UnaryOp + new: name: string -> UnaryOpElementwise + override ad_dfda: a: Tensor * ad: Tensor * f: Tensor -> Tensor + abstract dfda: a: Tensor * f: Tensor -> Tensor + override fd_dfda: a: Tensor * f: Tensor * fd: Tensor -> Tensor
<summary>Defines a new op implementing an elementwise unary function and its derivatives. Instances of this class are used with the <see cref="M:Furnace.Tensor.Op(Furnace.UnaryOp)" /> method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation.</summary>
<remarks><para>This type is specialized to elementwise ops. It requires the user to specify only (1) the <see cref="T:Furnace.Backends.RawTensor" /> operation and (2) the derivative of the function with respect to its argument. The corresponding derivative propagation rules for the forward and reverse differentiation modes are automatically generated.</para><para>If you are implementing a complex op that is not elementwise, you can use the generic type <see cref="T:Furnace.UnaryOp" />, which allows you to define the full derivative propagation rules.</para></remarks>
<example><code> + { new UnaryOpElementwise("cos") with + member _.fRaw(a) = a.CosT() + member _.dfda(a,f) = -a.sin() + } + + { new UnaryOpElementwise("exp") with + member _.fRaw(a) = a.ExpT() + member _.dfda(a,f) = f + } + + { new UnaryOpElementwise("log") with + member _.fRaw(a) = a.LogT() + member _.dfda(a,f) = 1/a + } + </code></example>


--------------------
new: name: string -> UnaryOpElementwise
+
val a: Backends.RawTensor
+
abstract Backends.RawTensor.SinT: unit -> Backends.RawTensor
+
val f: Tensor
+
abstract Backends.RawTensor.LogT: unit -> Backends.RawTensor
+
val b: Tensor
+
Multiple items
type BinaryOpElementwise = + inherit BinaryOp + new: name: string -> BinaryOpElementwise + override ad_dfda: a: Tensor * ad: Tensor * b: Tensor * f: Tensor -> Tensor + override bd_dfdb: a: Tensor * b: Tensor * bd: Tensor * f: Tensor -> Tensor + abstract dfda: a: Tensor * b: Tensor * f: Tensor -> Tensor + abstract dfdb: a: Tensor * b: Tensor * f: Tensor -> Tensor + override fd_dfda: a: Tensor * b: Tensor * f: Tensor * fd: Tensor -> Tensor + override fd_dfdb: a: Tensor * b: Tensor * f: Tensor * fd: Tensor -> Tensor
<summary>Defines a new op implementing an elementwise binary function and its derivatives. Instances of this class are used with the <see cref="M:Furnace.Tensor.Op(Furnace.BinaryOp)" /> method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation.</summary>
<remarks> + This type is specialized to elementwise ops. It requires the user to specify only (1) the <see cref="T:Furnace.Backends.RawTensor" /> operation and (2) the derivative of the function with respect to each argument. The corresponding derivative propagation rules for the forward and reverse differentiation modes are automatically generated. + <para>If you are implementing a complex op that is not elementwise, you can use the generic type <see cref="T:Furnace.BinaryOp" />, which allows you to define the full derivative propagation rules.</para></remarks>
<example><code> + { new BinaryOpElementwise("pow") with + member _.fRaw(a,b) = a.PowTT(b) + member _.dfda(a,b,f) = b * f / a + member _.dfdb(a,b,f) = f * a.log() + } + + { new BinaryOpElementwise("mul") with + member _.fRaw(a,b) = a.MulTT(b) + member _.dfda(a,b,f) = b + member _.dfdb(a,b,f) = a + } + </code></example>


--------------------
new: name: string -> BinaryOpElementwise
+
val b: Backends.RawTensor
+
abstract Backends.RawTensor.MulTT: t2: Backends.RawTensor -> Backends.RawTensor
+
abstract Backends.RawTensor.PowTT: t2: Backends.RawTensor -> Backends.RawTensor
+
Multiple items
type UnaryOp = + new: name: string -> UnaryOp + abstract ad_dfda: a: Tensor * ad: Tensor * f: Tensor -> Tensor + abstract fRaw: a: RawTensor -> RawTensor + abstract fd_dfda: a: Tensor * f: Tensor * fd: Tensor -> Tensor + member name: string
<summary>Defines a new op implementing a unary function and its derivatives. Instances of this class are used with the <see cref="M:Furnace.Tensor.Op(Furnace.UnaryOp)" /> method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation.</summary>
<remarks><para>This type represents the most generic definition of a new op representing a unary function, allowing the specification of: (1) the <see cref="T:Furnace.Backends.RawTensor" /> operation, (2) the derivative propagation rule for the forward differentiation mode and (3) the derivative propagation rule for the reverse differentiation mode.</para><para>In general, if you are implementing a simple elementwise op, you should prefer using the <see cref="T:Furnace.UnaryOpElementwise" /> type, which is much simpler to use.</para></remarks>
<example><code> + { new UnaryOp("transpose") with + member _.fRaw(a) = a.TransposeT2() + member _.ad_dfda(a,ad,f) = ad.transpose() + member _.fd_dfda(a,f,fd) = fd.transpose() + } + </code></example>


--------------------
new: name: string -> UnaryOp
+
abstract Backends.RawTensor.TransposeT2: unit -> Backends.RawTensor
+
val ad: Tensor
+
val fd: Tensor
+
Multiple items
type BinaryOp = + new: name: string -> BinaryOp + abstract ad_dfda: a: Tensor * ad: Tensor * b: Tensor * f: Tensor -> Tensor + abstract bd_dfdb: a: Tensor * b: Tensor * bd: Tensor * f: Tensor -> Tensor + abstract fRaw: a: RawTensor * b: RawTensor -> RawTensor + abstract fd_dfda: a: Tensor * b: Tensor * f: Tensor * fd: Tensor -> Tensor + abstract fd_dfdb: a: Tensor * b: Tensor * f: Tensor * fd: Tensor -> Tensor + member name: string
<summary>Defines a new op implementing a binary function and its derivatives. Instances of this class are used with the <see cref="M:Furnace.Tensor.Op(Furnace.BinaryOp)" /> method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation.</summary>
<remarks><para>This type represents the most generic definition of a new op representing a binary function, allowing the specification of: (1) the <see cref="T:Furnace.Backends.RawTensor" /> operation, (2) the derivative propagation rule for the forward differentiation mode and (3) the derivative propagation rule for the reverse differentiation mode.</para><para>In general, if you are implementing a simple elementwise op, you should prefer using the <see cref="T:Furnace.BinaryOpElementwise" /> type, which is much simpler to use.</para></remarks>
<example><code> + { new BinaryOp("matmul") with + member _.fRaw(a,b) = a.MatMulTT(b) + member _.ad_dfda(a,ad,b,f) = ad.matmul(b) + member _.bd_dfdb(a,b,bd,f) = a.matmul(bd) + member _.fd_dfda(a,b,f,fd) = fd.matmul(b.transpose()) + member _.fd_dfdb(a,b,f,fd) = a.transposeExt().matmul(fd) + } + </code></example>


--------------------
new: name: string -> BinaryOp
+
abstract Backends.RawTensor.MatMulTT: t2: Backends.RawTensor -> Backends.RawTensor
+
val bd: Tensor
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/extensions.ipynb b/extensions.ipynb new file mode 100644 index 00000000..45adda75 --- /dev/null +++ b/extensions.ipynb @@ -0,0 +1,202 @@ + + { + "cells": [ + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n", +"// !bash \u003c(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Import Furnace package\n", +"#r \"nuget: Furnace-lite,1.0.8\"\n", +"\n", +"// Set dotnet interactive formatter to plaintext\n", +"Formatter.SetPreferredMimeTypesFor(typeof\u003cobj\u003e, \"text/plain\")\n", +"Formatter.Register(fun (x:obj) (writer: TextWriter) -\u003e fprintfn writer \"%120A\" x )\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/extensions.ipynb)\u0026emsp;\n", +"[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath=extensions.ipynb)\u0026emsp;\n", +"[![Script](img/badge-script.svg)](extensions.fsx)\u0026emsp;\n", +"[![Script](img/badge-notebook.svg)](extensions.ipynb)\n", +"\n", +"# Extending Furnace\n", +"\n", +"Furnace provides most of the essential operations found in tensor libraries such as [NumPy](https://numpy.org/), [PyTorch](https://pytorch.org/), and [TensorFlow](https://www.tensorflow.org/). All differentiable operations support the forward, reverse, and nested differentiation modes.\n", +"\n", +"When implementing new operations, you should prefer to implement these as compositions of existing Furnace [Tensor](https://fsprojects.github.io/Furnace/reference/furnace-tensor.html) operations, which would give you differentiability out of the box.\n", +"\n", +"In the rare cases where you need to extend Furnace with a completely new differentiable operation that cannot be implemented as a composition of existing operations, you can use the provided extension API.\n", +"\n", +"## Simple elementwise functions\n", +"\n", +"If the function you would like to implement is a simple elementwise function, you can use the [UnaryOpElementwise](https://fsprojects.github.io/Furnace/reference/furnace-unaryopelementwise.html) or [BinaryOpElementwise](https://fsprojects.github.io/Furnace/reference/furnace-binaryopelementwise.html) types to define your function and its derivatives. The forward, reverse, and nested differentiation rules for the function are automatically generated by the type. The documentation of these two types detail how they should be instantiated.\n", +"\n", +"Let\u0027s see several examples.\n", +"\n", +"$f(a) = \\mathrm{sin}(a)$, with derivative $\\frac{\\partial f(a)}{\\partial a} = \\mathrm{cos}(a) \\;$.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 2, "outputs": [], + "source": ["open Furnace\n", +"\n", +"type Tensor with\n", +" member a.sin() = \n", +" Tensor.Op\n", +" { new UnaryOpElementwise(\"sin\") with \n", +" member _.fRaw(a) = a.SinT()\n", +" member _.dfda(a,f) = a.cos()\n", +" }\n", +" (a)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["$f(a) = \\mathrm{log}(a)$, with derivative $\\frac{\\partial f(a)}{\\partial a} = 1/a \\;$.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 3, "outputs": [], + "source": ["type Tensor with\n", +" member a.log() =\n", +" Tensor.Op\n", +" { new UnaryOpElementwise(\"log\") with\n", +" member _.fRaw(a) = a.LogT()\n", +" member _.dfda(a,f) = 1/a\n", +" }\n", +" (a)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["$f(a, b) = ab$, with derivatives $\\frac{\\partial f(a, b)}{\\partial a} = b$, $\\frac{\\partial f(a, b)}{\\partial b} = a \\;$.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 4, "outputs": [], + "source": ["type Tensor with\n", +" member a.mul(b) =\n", +" Tensor.Op\n", +" { new BinaryOpElementwise(\"mul\") with\n", +" member _.fRaw(a,b) = a.MulTT(b)\n", +" member _.dfda(a,b,f) = b\n", +" member _.dfdb(a,b,f) = a\n", +" }\n", +" (a,b)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["$f(a, b) = a^b$, with derivatives $\\frac{\\partial f(a, b)}{\\partial a} = b a^{b-1}$, $\\frac{\\partial f(a, b)}{\\partial b} = a^b \\mathrm{log}(a) \\;$. Note the use of the argument `f` in the derivative definitions that makes use of the pre-computed value of $f(a, b) = a^b$ that is available to the derivative implementation.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 5, "outputs": [], + "source": ["type Tensor with\n", +" member a.pow(b) =\n", +" Tensor.Op\n", +" { new BinaryOpElementwise(\"pow\") with\n", +" member _.fRaw(a,b) = a.PowTT(b)\n", +" member _.dfda(a,b,f) = b * f / a // equivalent to b * a.pow(b-1)\n", +" member _.dfdb(a,b,f) = f * a.log() // equivalent to a.pow(b) * a.log()\n", +" }\n", +" (a,b)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["## General functions\n", +"\n", +"For more complicated functions, you can use the most general way of defining functions using the [UnaryOp](https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html) or [BinaryOp](https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html) types, which allow you to define the full forward and reverse mode differentiation rules. The documentation of these two types detail how they should be instantiated.\n", +"\n", +"Let\u0027s see several examples.\n", +"\n", +"$f(A) = A^{\\intercal}$, with the forward derivative propagation rule $\\frac{\\partial f(A)}{\\partial X} = \\frac{\\partial A}{\\partial X} \\frac{\\partial f(A)}{\\partial A} = (\\frac{\\partial A}{\\partial X})^{\\intercal}$ and the reverse derivative propagation rule $\\frac{\\partial Y}{\\partial A} = \\frac{\\partial Y}{\\partial f(A)} \\frac{\\partial f(A)}{\\partial A} = (\\frac{\\partial Y}{\\partial f(A)})^{\\intercal} \\;$.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 6, "outputs": [], + "source": ["type Tensor with\n", +" member a.transpose() =\n", +" Tensor.Op\n", +" { new UnaryOp(\"transpose\") with\n", +" member _.fRaw(a) = a.TransposeT2()\n", +" member _.ad_dfda(a,ad,f) = ad.transpose()\n", +" member _.fd_dfda(a,f,fd) = fd.transpose()\n", +" }\n", +" (a)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["$f(A, B) = AB$, with the forward derivative propagation rule $\\frac{\\partial(A, B)}{\\partial X} = \\frac{\\partial A}{\\partial X} \\frac{\\partial f(A, B)}{\\partial A} + \\frac{\\partial B}{\\partial X} \\frac{\\partial f(A, B)}{\\partial B} = \\frac{\\partial A}{\\partial X} B + A \\frac{\\partial B}{\\partial X}$ and the reverse propagation rule $\\frac{\\partial Y}{\\partial A} = \\frac{\\partial Y}{\\partial f(A, B)} \\frac{\\partial f(A, B)}{\\partial A} = \\frac{\\partial Y}{\\partial f(A, B)} B^{\\intercal}$, $\\frac{\\partial Y}{\\partial B} = \\frac{\\partial Y}{\\partial f(A, B)} \\frac{\\partial f(A, B)}{B} = A^{\\intercal} \\frac{\\partial Y}{\\partial f(A, B)} \\;$.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 7, "outputs": [], + "source": ["type Tensor with\n", +" member a.matmul(b) =\n", +" Tensor.Op\n", +" { new BinaryOp(\"matmul\") with\n", +" member _.fRaw(a,b) = a.MatMulTT(b)\n", +" member _.ad_dfda(a,ad,b,f) = ad.matmul(b)\n", +" member _.bd_dfdb(a,b,bd,f) = a.matmul(bd)\n", +" member _.fd_dfda(a,b,f,fd) = fd.matmul(b.transpose())\n", +" member _.fd_dfdb(a,b,f,fd) = a.transpose().matmul(fd)\n", +" }\n", +" (a,b)\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/favicon.ico b/favicon.ico new file mode 100644 index 00000000..f9d76aed Binary files /dev/null and b/favicon.ico differ diff --git a/img/Furnace-logo-text.png b/img/Furnace-logo-text.png new file mode 100644 index 00000000..7eca43bd Binary files /dev/null and b/img/Furnace-logo-text.png differ diff --git a/img/Furnace-logo.png b/img/Furnace-logo.png new file mode 100644 index 00000000..3b3ef9a6 Binary files /dev/null and b/img/Furnace-logo.png differ diff --git a/img/anim-intro-1.gif b/img/anim-intro-1.gif new file mode 100644 index 00000000..c9dd2166 Binary files /dev/null and b/img/anim-intro-1.gif differ diff --git a/img/anim-intro-2.gif b/img/anim-intro-2.gif new file mode 100644 index 00000000..c3cec8e4 Binary files /dev/null and b/img/anim-intro-2.gif differ diff --git a/img/badge-binder.svg b/img/badge-binder.svg new file mode 100644 index 00000000..8df9f49a --- /dev/null +++ b/img/badge-binder.svg @@ -0,0 +1 @@ +Run in BinderRun in Binder \ No newline at end of file diff --git a/img/badge-notebook.svg b/img/badge-notebook.svg new file mode 100644 index 00000000..a001b544 --- /dev/null +++ b/img/badge-notebook.svg @@ -0,0 +1 @@ +Download notebookDownload notebook \ No newline at end of file diff --git a/img/badge-script.svg b/img/badge-script.svg new file mode 100644 index 00000000..90c93ebe --- /dev/null +++ b/img/badge-script.svg @@ -0,0 +1 @@ +Download scriptDownload script \ No newline at end of file diff --git a/index.fsx b/index.fsx new file mode 100644 index 00000000..030b5293 --- /dev/null +++ b/index.fsx @@ -0,0 +1,166 @@ +#r "nuget: Furnace-lite,1.0.8" +(** +[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/gh-pages/index.ipynb)  +[![Script](img/badge-script.svg)](index.fsx)  +[![Script](img/badge-notebook.svg)](index.ipynb) + +# Furnace: Differentiable Tensor Programming Made Simple + +Furnace is a tensor library with support for [differentiable programming](https://en.wikipedia.org/wiki/Differentiable_programming). +It is designed for use in machine learning, probabilistic programming, optimization and other domains. + +*) + +(** +## Key Features + +🗹 Nested and mixed-mode differentiation + +🗹 Common optimizers, model elements, differentiable probability distributions + +🗹 F# for robust functional programming + +🗹 PyTorch familiar naming and idioms, efficient LibTorch CUDA/C++ tensors with GPU support + +🗹 Linux, macOS, Windows supported + +🗹 Use interactive notebooks in Jupyter and Visual Studio Code + +🗹 100% open source + +## Differentiable Programming + +Furnace provides world-leading automatic differentiation capabilities for tensor code, including composable gradients, Hessians, Jacobians, directional derivatives, and matrix-free Hessian- and Jacobian-vector products over arbitrary user code. This goes beyond conventional tensor libraries such as PyTorch and TensorFlow, allowing the use of nested forward and reverse differentiation up to any level. + +With Furnace, you can compute higher-order derivatives efficiently and differentiate functions that are internally making use of differentiation and gradient-based optimization. + +*) +
+ +(** +## Practical, Familiar and Efficient + +Furnace comes with a [LibTorch](https://pytorch.org/cppdocs/) backend, using the same C++ and CUDA implementations for tensor computations that power [PyTorch](https://pytorch.org/). On top of these raw tensors (LibTorch's ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. It is tested on Linux, macOS, and Windows, and it supports CUDA and GPUs. + +The Furnace API is designed to be similar to [the PyTorch Python API](https://pytorch.org/docs/stable/index.html) through very similar naming and idioms, and where elements have similar names the PyTorch documentation can generally be used as a guide. + +Furnace uses [the incredible F# programming language](https://dot.net/fsharp) for tensor programming. F# code is generally faster and more robust than equivalent Python code, while still being succinct and compact like Python, making it an ideal modern AI and machine learning implementation language. This allows fluent and productive code for tensor programming. + +*) +
+ +(** +## Interactive Notebooks + +All documentation pages in this website are interactive notebooks which you can execute directly in your browser without installing anything in your local machine. + +Using the [![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/gh-pages/index.ipynb) on the top of each page, you can execute the page as an interactive notebook running on cloud servers provided by [Google Colab](https://colab.research.google.com/). + +Using the buttons [![Script](img/badge-script.svg)](index.fsx) +[![Script](img/badge-notebook.svg)](index.ipynb) you can also download a page as a script or an interactive notebook, which you can execute locally in [Jupyter](https://jupyter.org/) or [Visual Studio Code](https://code.visualstudio.com/) using [dotnet interactive](https://github.com/dotnet/interactive). + +## Example + +Define and add two tensors: + +*) +open Furnace + +let t1 = FurnaceImage.tensor [ 0.0 ..0.2.. 1.0 ] // Gives [0., 0.2, 0.4, 0.6, 0.8, 1.] +let t2 = FurnaceImage.tensor [ 1, 2, 3, 4, 5, 6 ] + +t1 + t2(* output: +No value returned by any evaluator*) +(** +Compute a convolution: + +*) +let t3 = FurnaceImage.tensor [[[[0.0 .. 10.0]]]] +let t4 = FurnaceImage.tensor [[[[0.0 ..0.1.. 1.0]]]] + +t3.conv2d(t4)(* output: +No value returned by any evaluator*) +(** +Take the gradient of a vector-to-scalar function: + +*) +let f (x: Tensor) = x.exp().sum() + +FurnaceImage.grad f (FurnaceImage.tensor([1.8, 2.5]))(* output: +No value returned by any evaluator*) +(** +Compute a nested derivative (checking for [perturbation confusion](https://doi.org/10.1007/s10990-008-9037-1)): + +*) +let x0 = FurnaceImage.tensor(1.) +let y0 = FurnaceImage.tensor(2.) +FurnaceImage.diff (fun x -> x * FurnaceImage.diff (fun y -> x * y) y0) x0(* output: +No value returned by any evaluator*) +(** +Define a model and optimize it: + +*) +open Furnace +open Furnace.Data +open Furnace.Model +open Furnace.Compose +open Furnace.Util +open Furnace.Optim + +let epochs = 2 +let batchSize = 32 +let numBatches = 5 + +let trainSet = MNIST("../data", train=true, transform=id) +let trainLoader = trainSet.loader(batchSize=batchSize, shuffle=true) + +let validSet = MNIST("../data", train=false, transform=id) +let validLoader = validSet.loader(batchSize=batchSize, shuffle=false) + +let encoder = + Conv2d(1, 32, 4, 2) + --> FurnaceImage.relu + --> Conv2d(32, 64, 4, 2) + --> FurnaceImage.relu + --> Conv2d(64, 128, 4, 2) + --> FurnaceImage.flatten(1) + +let decoder = + FurnaceImage.unflatten(1, [128;1;1]) + --> ConvTranspose2d(128, 64, 4, 2) + --> FurnaceImage.relu + --> ConvTranspose2d(64, 32, 4, 3) + --> FurnaceImage.relu + --> ConvTranspose2d(32, 1, 4, 2) + --> FurnaceImage.sigmoid + +let model = VAE([1;28;28], 64, encoder, decoder) + +let lr = FurnaceImage.tensor(0.001) +let optimizer = Adam(model, lr=lr) + +for epoch = 1 to epochs do + let batches = trainLoader.epoch(numBatches) + for i, x, _ in batches do + model.reverseDiff() + let l = model.loss(x) + l.reverse() + optimizer.step() + print $"Epoch: {epoch} minibatch: {i} loss: {l}" + +let validLoss = + validLoader.epoch() + |> Seq.sumBy (fun (_, x, _) -> model.loss(x, normalize=false)) +print $"Validation loss: {validLoss/validSet.length}" +(** +Numerous other model definition, differentiation, and training patterns are supported. See the tutorials in the left-hand menu and [examples](https://github.com/fsprojects/Furnace/tree/dev/examples) on GitHub. + +## More Information + +Furnace is developed by [Atılım Güneş Baydin](http://www.robots.ox.ac.uk/~gunes/), [Don Syme](https://www.microsoft.com/en-us/research/people/dsyme/) +and other contributors, having started as a project supervised by the automatic differentiation wizards [Barak Pearlmutter](https://scholar.google.com/citations?user=AxFrw0sAAAAJ&hl=en) and [Jeffrey Siskind](https://scholar.google.com/citations?user=CgSBtPYAAAAJ&hl=en). + +Please join us [on GitHub](https://github.com/fsprojects/Furnace)! + +*) + diff --git a/index.html b/index.html new file mode 100644 index 00000000..be11e842 --- /dev/null +++ b/index.html @@ -0,0 +1,400 @@ + + + + + Furnace: Differentiable Tensor Programming Made Simple + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+

Binder  +Script  +Script

+

Furnace: Differentiable Tensor Programming Made Simple

+

Furnace is a tensor library with support for differentiable programming. +It is designed for use in machine learning, probabilistic programming, optimization and other domains.

+ +

Key Features

+

🗹 Nested and mixed-mode differentiation

+

🗹 Common optimizers, model elements, differentiable probability distributions

+

🗹 F# for robust functional programming

+

🗹 PyTorch familiar naming and idioms, efficient LibTorch CUDA/C++ tensors with GPU support

+

🗹 Linux, macOS, Windows supported

+

🗹 Use interactive notebooks in Jupyter and Visual Studio Code

+

🗹 100% open source

+

Differentiable Programming

+

Furnace provides world-leading automatic differentiation capabilities for tensor code, including composable gradients, Hessians, Jacobians, directional derivatives, and matrix-free Hessian- and Jacobian-vector products over arbitrary user code. This goes beyond conventional tensor libraries such as PyTorch and TensorFlow, allowing the use of nested forward and reverse differentiation up to any level.

+

With Furnace, you can compute higher-order derivatives efficiently and differentiate functions that are internally making use of differentiation and gradient-based optimization.

+
+ +

Practical, Familiar and Efficient

+

Furnace comes with a LibTorch backend, using the same C++ and CUDA implementations for tensor computations that power PyTorch. On top of these raw tensors (LibTorch's ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. It is tested on Linux, macOS, and Windows, and it supports CUDA and GPUs.

+

The Furnace API is designed to be similar to the PyTorch Python API through very similar naming and idioms, and where elements have similar names the PyTorch documentation can generally be used as a guide.

+

Furnace uses the incredible F# programming language for tensor programming. F# code is generally faster and more robust than equivalent Python code, while still being succinct and compact like Python, making it an ideal modern AI and machine learning implementation language. This allows fluent and productive code for tensor programming.

+
+ +

Interactive Notebooks

+

All documentation pages in this website are interactive notebooks which you can execute directly in your browser without installing anything in your local machine.

+

Using the Binder on the top of each page, you can execute the page as an interactive notebook running on cloud servers provided by Google Colab.

+

Using the buttons Script +Script you can also download a page as a script or an interactive notebook, which you can execute locally in Jupyter or Visual Studio Code using dotnet interactive.

+

Example

+

Define and add two tensors:

+
open Furnace
+
+let t1 = FurnaceImage.tensor [ 0.0 ..0.2.. 1.0 ] // Gives [0., 0.2, 0.4, 0.6, 0.8, 1.]
+let t2 = FurnaceImage.tensor [ 1, 2, 3, 4, 5, 6 ]
+
+t1 + t2
+
+
No value returned by any evaluator
+

Compute a convolution:

+
let t3 = FurnaceImage.tensor [[[[0.0 .. 10.0]]]]
+let t4 = FurnaceImage.tensor [[[[0.0 ..0.1.. 1.0]]]]
+
+t3.conv2d(t4)
+
+
No value returned by any evaluator
+

Take the gradient of a vector-to-scalar function:

+
let f (x: Tensor) = x.exp().sum()
+
+FurnaceImage.grad f (FurnaceImage.tensor([1.8, 2.5]))
+
+
No value returned by any evaluator
+

Compute a nested derivative (checking for perturbation confusion):

+
let x0 = FurnaceImage.tensor(1.)
+let y0 = FurnaceImage.tensor(2.)
+FurnaceImage.diff (fun x -> x * FurnaceImage.diff (fun y -> x * y) y0) x0
+
+
No value returned by any evaluator
+

Define a model and optimize it:

+
open Furnace
+open Furnace.Data
+open Furnace.Model
+open Furnace.Compose
+open Furnace.Util
+open Furnace.Optim
+
+let epochs = 2
+let batchSize = 32
+let numBatches = 5
+
+let trainSet = MNIST("../data", train=true, transform=id)
+let trainLoader = trainSet.loader(batchSize=batchSize, shuffle=true)
+
+let validSet = MNIST("../data", train=false, transform=id)
+let validLoader = validSet.loader(batchSize=batchSize, shuffle=false)
+
+let encoder =
+    Conv2d(1, 32, 4, 2)
+    --> FurnaceImage.relu
+    --> Conv2d(32, 64, 4, 2)
+    --> FurnaceImage.relu
+    --> Conv2d(64, 128, 4, 2)
+    --> FurnaceImage.flatten(1)
+
+let decoder =
+    FurnaceImage.unflatten(1, [128;1;1])
+    --> ConvTranspose2d(128, 64, 4, 2)
+    --> FurnaceImage.relu
+    --> ConvTranspose2d(64, 32, 4, 3)
+    --> FurnaceImage.relu
+    --> ConvTranspose2d(32, 1, 4, 2)
+    --> FurnaceImage.sigmoid
+
+let model = VAE([1;28;28], 64, encoder, decoder)
+
+let lr = FurnaceImage.tensor(0.001)
+let optimizer = Adam(model, lr=lr)
+
+for epoch = 1 to epochs do
+    let batches = trainLoader.epoch(numBatches)
+    for i, x, _ in batches do
+        model.reverseDiff()
+        let l = model.loss(x)
+        l.reverse()
+        optimizer.step()
+        print $"Epoch: {epoch} minibatch: {i} loss: {l}" 
+
+let validLoss = 
+    validLoader.epoch() 
+    |> Seq.sumBy (fun (_, x, _) -> model.loss(x, normalize=false))
+print $"Validation loss: {validLoss/validSet.length}"
+
+

Numerous other model definition, differentiation, and training patterns are supported. See the tutorials in the left-hand menu and examples on GitHub.

+

More Information

+

Furnace is developed by Atılım Güneş Baydin, Don Syme +and other contributors, having started as a project supervised by the automatic differentiation wizards Barak Pearlmutter and Jeffrey Siskind.

+

Please join us on GitHub!

+ +
namespace Furnace
+
type FurnaceImage = + static member abs: input: Tensor -> Tensor + static member acos: input: Tensor -> Tensor + static member add: a: Tensor * b: Tensor -> Tensor + static member arange: endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member arangeLike: input: Tensor * endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member argmax: input: Tensor -> int[] + 1 overload + static member argmin: input: Tensor -> int[] + 1 overload + static member asin: input: Tensor -> Tensor + static member atan: input: Tensor -> Tensor + static member backends: unit -> Backend list + ...
<summary> + Tensor operations +</summary>
+
static member Furnace.FurnaceImage.config: unit -> Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer
static member Furnace.FurnaceImage.config: configuration: (Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer) -> unit
static member Furnace.FurnaceImage.config: ?device: Furnace.Device * ?dtype: Furnace.Dtype * ?backend: Furnace.Backend * ?printer: Furnace.Printer -> unit
+
Multiple items
module Backend + +from Furnace
<summary> + Contains functions and settings related to backend specifications. +</summary>

--------------------
type Backend = + | Reference + | Torch + | Other of name: string * code: int + override ToString: unit -> string + member Name: string
<summary> + Represents a backend for Furnace tensors +</summary>
+
union case Furnace.Backend.Reference: Furnace.Backend
<summary> + The reference backend +</summary>
+
static member Furnace.FurnaceImage.seed: ?seed: int -> unit
+
val t1: Tensor
+
static member FurnaceImage.tensor: value: obj * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor
+
val t2: Tensor
+
val t3: Tensor
+
val t4: Tensor
+
val f: x: Tensor -> Tensor
+
val x: Tensor
+
type Tensor = + private | TensorC of primalRaw: RawTensor + | TensorF of primal: Tensor * derivative: Tensor * nestingTag: uint32 + | TensorR of primal: Tensor * derivative: Tensor ref * parentOp: TensorOp * fanout: uint32 ref * nestingTag: uint32 + interface IConvertible + interface IComparable + override Equals: other: obj -> bool + override GetHashCode: unit -> int + member GetSlice: bounds: int[,] -> Tensor + override ToString: unit -> string + member abs: unit -> Tensor + member acos: unit -> Tensor + member add: b: Tensor -> Tensor + 1 overload + member addSlice: location: seq<int> * b: Tensor -> Tensor + ...
<summary> + Represents a multi-dimensional data type containing elements of a single data type. + </summary>
<example> + A tensor can be constructed from a list or sequence using <see cref="M:Furnace.FurnaceImage.tensor(System.Object)" /><code> + let t = FurnaceImage.tensor([[1.; -1.]; [1.; -1.]]) + </code></example>
+
static member FurnaceImage.grad: f: (Tensor -> Tensor) -> x: Tensor -> Tensor
+
val x0: Tensor
+
val y0: Tensor
+
static member FurnaceImage.diff: f: (Tensor -> Tensor) -> x: Tensor -> Tensor
+
val y: Tensor
+
namespace Furnace.Data
+
namespace Furnace.Model
+
module Compose + +from Furnace
+
namespace Furnace.Util
+
namespace Furnace.Optim
+
val epochs: int
+
val batchSize: int
+
val numBatches: int
+
val trainSet: MNIST
+
Multiple items
type MNIST = + inherit Dataset + new: path: string * ?urls: seq<string> * ?train: bool * ?transform: (Tensor -> Tensor) * ?targetTransform: (Tensor -> Tensor) * ?n: int -> MNIST + override item: i: int -> Tensor * Tensor + member classNames: string[] + member classes: int + override length: int

--------------------
new: path: string * ?urls: seq<string> * ?train: bool * ?transform: (Tensor -> Tensor) * ?targetTransform: (Tensor -> Tensor) * ?n: int -> MNIST
+
val id: x: 'T -> 'T
<summary>The identity function</summary>
<param name="x">The input value.</param>
<returns>The same value.</returns>
<example id="id-example"><code lang="fsharp"> + id 12 // Evaulates to 12 + id "abc" // Evaulates to "abc" + </code></example>
+
val trainLoader: DataLoader
+
member Dataset.loader: batchSize: int * ?shuffle: bool * ?dropLast: bool * ?device: Device * ?dtype: Dtype * ?backend: Backend * ?targetDevice: Device * ?targetDtype: Dtype * ?targetBackend: Backend -> DataLoader
+
val validSet: MNIST
+
val validLoader: DataLoader
+
val encoder: Model<Tensor,Tensor>
+
Multiple items
type Conv2d = + inherit Model + new: inChannels: int * outChannels: int * ?kernelSize: int * ?stride: int * ?padding: int * ?dilation: int * ?kernelSizes: seq<int> * ?strides: seq<int> * ?paddings: seq<int> * ?dilations: seq<int> * ?bias: bool -> Conv2d + override ToString: unit -> string + override forward: value: Tensor -> Tensor + member bias: Tensor + member weight: Tensor
<summary>A model that applies a 2D convolution over an input signal composed of several input planes</summary>

--------------------
new: inChannels: int * outChannels: int * ?kernelSize: int * ?stride: int * ?padding: int * ?dilation: int * ?kernelSizes: seq<int> * ?strides: seq<int> * ?paddings: seq<int> * ?dilations: seq<int> * ?bias: bool -> Conv2d
+
static member FurnaceImage.relu: input: Tensor -> Tensor
+
static member FurnaceImage.flatten: startDim: int * ?endDim: int -> (Tensor -> Tensor)
static member FurnaceImage.flatten: input: Tensor * ?startDim: int * ?endDim: int -> Tensor
+
val decoder: Model<Tensor,Tensor>
+
static member FurnaceImage.unflatten: dim: int * unflattenedShape: seq<int> -> (Tensor -> Tensor)
static member FurnaceImage.unflatten: input: Tensor * dim: int * unflattenedShape: seq<int> -> Tensor
+
Multiple items
type ConvTranspose2d = + inherit Model + new: inChannels: int * outChannels: int * ?kernelSize: int * ?stride: int * ?padding: int * ?dilation: int * ?kernelSizes: seq<int> * ?strides: seq<int> * ?paddings: seq<int> * ?dilations: seq<int> * ?bias: bool -> ConvTranspose2d + override ToString: unit -> string + override forward: value: Tensor -> Tensor + member bias: Tensor + member weight: Tensor
<summary>A model that applies a 2D transposed convolution operator over an input image composed of several input planes.</summary>

--------------------
new: inChannels: int * outChannels: int * ?kernelSize: int * ?stride: int * ?padding: int * ?dilation: int * ?kernelSizes: seq<int> * ?strides: seq<int> * ?paddings: seq<int> * ?dilations: seq<int> * ?bias: bool -> ConvTranspose2d
+
static member FurnaceImage.sigmoid: input: Tensor -> Tensor
+
val model: VAE
+
Multiple items
type VAE = + inherit VAEBase + new: xShape: seq<int> * zDim: int * encoder: Model * decoder: Model -> VAE + override ToString: unit -> string + override decode: z: Tensor -> Tensor + override encode: x: Tensor -> Tensor * Tensor
<summary>Variational auto-encoder</summary>

--------------------
new: xShape: seq<int> * zDim: int * encoder: Model * decoder: Model -> VAE
+
val lr: Tensor
+
static member FurnaceImage.tensor: ?device: Device * ?dtype: Dtype * ?backend: Backend -> ('a -> Tensor)
static member FurnaceImage.tensor: value: obj * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor
+
val optimizer: Adam
+
Multiple items
type Adam = + inherit Optimizer + new: model: Model * ?lr: Tensor * ?beta1: Tensor * ?beta2: Tensor * ?eps: Tensor * ?weightDecay: Tensor * ?reversible: bool -> Adam + override updateRule: name: string -> t: Tensor -> Tensor
<summary>TBD</summary>

--------------------
new: model: Model * ?lr: Tensor * ?beta1: Tensor * ?beta2: Tensor * ?eps: Tensor * ?weightDecay: Tensor * ?reversible: bool -> Adam
+
val epoch: int
+
val batches: seq<int * Tensor * Tensor>
+
member DataLoader.epoch: ?numBatches: int -> seq<int * Tensor * Tensor>
+
val i: int
+
member ModelBase.reverseDiff: ?nestingTag: uint32 -> unit
+
val l: Tensor
+
member VAEBase.loss: x: Tensor * ?normalize: bool -> Tensor
+
member Optimizer.step: unit -> unit
+
val print: x: 'a -> unit
<summary> + Print the given value to the console using the '%A' printf format specifier +</summary>
+
val validLoss: Tensor
+
Multiple items
module Seq + +from Furnace.Util
<summary> + Contains extensions to the F# Seq module. +</summary>

--------------------
module Seq + +from Microsoft.FSharp.Collections
<summary>Contains operations for working with values of type <see cref="T:Microsoft.FSharp.Collections.seq`1" />.</summary>
+
val sumBy: projection: ('T -> 'U) -> source: seq<'T> -> 'U (requires member (+) and member get_Zero)
<summary>Returns the sum of the results generated by applying the function to each element of the sequence.</summary>
<remarks>The generated elements are summed using the <c>+</c> operator and <c>Zero</c> property associated with the generated type.</remarks>
<param name="projection">A function to transform items from the input sequence into the type that will be summed.</param>
<param name="source">The input sequence.</param>
<returns>The computed sum.</returns>
<example id="sumby-1"><code lang="fsharp"> + let input = [ "aa"; "bbb"; "cc" ] + + input |&gt; Seq.sumBy (fun s -&gt; s.Length) + </code> + Evaluates to <c>7</c>. + </example>
+
property MNIST.length: int with get
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/index.ipynb b/index.ipynb new file mode 100644 index 00000000..9c985a06 --- /dev/null +++ b/index.ipynb @@ -0,0 +1,313 @@ + + { + "cells": [ + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n", +"// !bash \u003c(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Import Furnace package\n", +"#r \"nuget: Furnace-lite,1.0.8\"\n", +"\n", +"// Set dotnet interactive formatter to plaintext\n", +"Formatter.SetPreferredMimeTypesFor(typeof\u003cobj\u003e, \"text/plain\")\n", +"Formatter.Register(fun (x:obj) (writer: TextWriter) -\u003e fprintfn writer \"%120A\" x )\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/gh-pages/index.ipynb)\u0026emsp;\n", +"[![Script](img/badge-script.svg)](index.fsx)\u0026emsp;\n", +"[![Script](img/badge-notebook.svg)](index.ipynb)\n", +"\n", +"# Furnace: Differentiable Tensor Programming Made Simple\n", +"\n", +"Furnace is a tensor library with support for [differentiable programming](https://en.wikipedia.org/wiki/Differentiable_programming).\n", +"It is designed for use in machine learning, probabilistic programming, optimization and other domains.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["\u003cbutton class=\"button\" style=\"vertical-align:middle\" onclick=\"window.location.href=\u0027https://fsprojects.github.io/Furnace/install.html\u0027\"\u003e\u003cspan\u003eInstall »\u003c/span\u003e\u003c/button\u003e\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["## Key Features\n", +"\n", +"🗹 Nested and mixed-mode differentiation\n", +"\n", +"🗹 Common optimizers, model elements, differentiable probability distributions\n", +"\n", +"🗹 F# for robust functional programming\n", +"\n", +"🗹 PyTorch familiar naming and idioms, efficient LibTorch CUDA/C++ tensors with GPU support\n", +"\n", +"🗹 Linux, macOS, Windows supported\n", +"\n", +"🗹 Use interactive notebooks in Jupyter and Visual Studio Code\n", +"\n", +"🗹 100% open source\n", +"\n", +"## Differentiable Programming\n", +"\n", +"Furnace provides world-leading automatic differentiation capabilities for tensor code, including composable gradients, Hessians, Jacobians, directional derivatives, and matrix-free Hessian- and Jacobian-vector products over arbitrary user code. This goes beyond conventional tensor libraries such as PyTorch and TensorFlow, allowing the use of nested forward and reverse differentiation up to any level.\n", +"\n", +"With Furnace, you can compute higher-order derivatives efficiently and differentiate functions that are internally making use of differentiation and gradient-based optimization.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["\u003c/br\u003e\n", +"\u003cimg src=\"img/anim-intro-2.gif\" width=\"85%\" /\u003e\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["## Practical, Familiar and Efficient\n", +"\n", +"Furnace comes with a [LibTorch](https://pytorch.org/cppdocs/) backend, using the same C++ and CUDA implementations for tensor computations that power [PyTorch](https://pytorch.org/). On top of these raw tensors (LibTorch\u0027s ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. It is tested on Linux, macOS, and Windows, and it supports CUDA and GPUs.\n", +"\n", +"The Furnace API is designed to be similar to [the PyTorch Python API](https://pytorch.org/docs/stable/index.html) through very similar naming and idioms, and where elements have similar names the PyTorch documentation can generally be used as a guide.\n", +"\n", +"Furnace uses [the incredible F# programming language](https://dot.net/fsharp) for tensor programming. F# code is generally faster and more robust than equivalent Python code, while still being succinct and compact like Python, making it an ideal modern AI and machine learning implementation language. This allows fluent and productive code for tensor programming.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["\u003c/br\u003e\n", +"\u003ciframe width=\"85%\" src=\"https://www.youtube.com/embed/_QnbV6CAWXc\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen\u003e\u003c/iframe\u003e\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["## Interactive Notebooks\n", +"\n", +"All documentation pages in this website are interactive notebooks which you can execute directly in your browser without installing anything in your local machine.\n", +"\n", +"Using the [![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/gh-pages/index.ipynb) on the top of each page, you can execute the page as an interactive notebook running on cloud servers provided by [Google Colab](https://colab.research.google.com/).\n", +"\n", +"Using the buttons [![Script](img/badge-script.svg)](index.fsx)\n", +"[![Script](img/badge-notebook.svg)](index.ipynb) you can also download a page as a script or an interactive notebook, which you can execute locally in [Jupyter](https://jupyter.org/) or [Visual Studio Code](https://code.visualstudio.com/) using [dotnet interactive](https://github.com/dotnet/interactive).\n", +"\n", +"## Example\n", +"\n", +"Define and add two tensors:\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 2, "outputs": [ + { + "data": { + "text/plain": ["No value returned by any evaluator"] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["open Furnace\n", +"\n", +"let t1 = FurnaceImage.tensor [ 0.0 ..0.2.. 1.0 ] // Gives [0., 0.2, 0.4, 0.6, 0.8, 1.]\n", +"let t2 = FurnaceImage.tensor [ 1, 2, 3, 4, 5, 6 ]\n", +"\n", +"t1 + t2\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["Compute a convolution:\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 3, "outputs": [ + { + "data": { + "text/plain": ["No value returned by any evaluator"] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["let t3 = FurnaceImage.tensor [[[[0.0 .. 10.0]]]]\n", +"let t4 = FurnaceImage.tensor [[[[0.0 ..0.1.. 1.0]]]]\n", +"\n", +"t3.conv2d(t4)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["Take the gradient of a vector-to-scalar function:\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 4, "outputs": [ + { + "data": { + "text/plain": ["No value returned by any evaluator"] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["let f (x: Tensor) = x.exp().sum()\n", +"\n", +"FurnaceImage.grad f (FurnaceImage.tensor([1.8, 2.5]))\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["Compute a nested derivative (checking for [perturbation confusion](https://doi.org/10.1007/s10990-008-9037-1)):\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 5, "outputs": [ + { + "data": { + "text/plain": ["No value returned by any evaluator"] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["let x0 = FurnaceImage.tensor(1.)\n", +"let y0 = FurnaceImage.tensor(2.)\n", +"FurnaceImage.diff (fun x -\u003e x * FurnaceImage.diff (fun y -\u003e x * y) y0) x0\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["Define a model and optimize it:\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["open Furnace\n", +"open Furnace.Data\n", +"open Furnace.Model\n", +"open Furnace.Compose\n", +"open Furnace.Util\n", +"open Furnace.Optim\n", +"\n", +"let epochs = 2\n", +"let batchSize = 32\n", +"let numBatches = 5\n", +"\n", +"let trainSet = MNIST(\"../data\", train=true, transform=id)\n", +"let trainLoader = trainSet.loader(batchSize=batchSize, shuffle=true)\n", +"\n", +"let validSet = MNIST(\"../data\", train=false, transform=id)\n", +"let validLoader = validSet.loader(batchSize=batchSize, shuffle=false)\n", +"\n", +"let encoder =\n", +" Conv2d(1, 32, 4, 2)\n", +" --\u003e FurnaceImage.relu\n", +" --\u003e Conv2d(32, 64, 4, 2)\n", +" --\u003e FurnaceImage.relu\n", +" --\u003e Conv2d(64, 128, 4, 2)\n", +" --\u003e FurnaceImage.flatten(1)\n", +"\n", +"let decoder =\n", +" FurnaceImage.unflatten(1, [128;1;1])\n", +" --\u003e ConvTranspose2d(128, 64, 4, 2)\n", +" --\u003e FurnaceImage.relu\n", +" --\u003e ConvTranspose2d(64, 32, 4, 3)\n", +" --\u003e FurnaceImage.relu\n", +" --\u003e ConvTranspose2d(32, 1, 4, 2)\n", +" --\u003e FurnaceImage.sigmoid\n", +"\n", +"let model = VAE([1;28;28], 64, encoder, decoder)\n", +"\n", +"let lr = FurnaceImage.tensor(0.001)\n", +"let optimizer = Adam(model, lr=lr)\n", +"\n", +"for epoch = 1 to epochs do\n", +" let batches = trainLoader.epoch(numBatches)\n", +" for i, x, _ in batches do\n", +" model.reverseDiff()\n", +" let l = model.loss(x)\n", +" l.reverse()\n", +" optimizer.step()\n", +" print $\"Epoch: {epoch} minibatch: {i} loss: {l}\" \n", +"\n", +"let validLoss = \n", +" validLoader.epoch() \n", +" |\u003e Seq.sumBy (fun (_, x, _) -\u003e model.loss(x, normalize=false))\n", +"print $\"Validation loss: {validLoss/validSet.length}\"\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["Numerous other model definition, differentiation, and training patterns are supported. See the tutorials in the left-hand menu and [examples](https://github.com/fsprojects/Furnace/tree/dev/examples) on GitHub.\n", +"\n", +"## More Information\n", +"\n", +"Furnace is developed by [Atılım Güneş Baydin](http://www.robots.ox.ac.uk/~gunes/), [Don Syme](https://www.microsoft.com/en-us/research/people/dsyme/)\n", +"and other contributors, having started as a project supervised by the automatic differentiation wizards [Barak Pearlmutter](https://scholar.google.com/citations?user=AxFrw0sAAAAJ\u0026hl=en) and [Jeffrey Siskind](https://scholar.google.com/citations?user=CgSBtPYAAAAJ\u0026hl=en).\n", +"\n", +"Please join us [on GitHub](https://github.com/fsprojects/Furnace)!\n", +"\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/index.json b/index.json new file mode 100644 index 00000000..f54a56c3 --- /dev/null +++ b/index.json @@ -0,0 +1 @@ +[{"uri":"https://fsprojects.github.io/Furnace/reference/furnace.html","title":"Furnace","content":"Backend \nCompose \nDevice \nDtype \nDtypeAutoOpens \nNumerical \nOpAvgPoolExtensions \nOpBMMExtensions \nOpDetExtensions \nOpInvExtensions \nOpNormExtensions \nOpOuterExtensions \nOpSolveExtensions \nPrinter \nScalarExtensions \nShape \nShapeAutoOpens \nShorten \nSlicingExtensions \nBackend \nBackendFunctionality\u003C\u0027T\u003E \nBinaryOp \nBinaryOpElementwise \nDevice \nDeviceType \nDtype \nFurnaceImage \nPrinter \nShape \nTensor \nTensorOp \nUnaryOp \nUnaryOpElementwise \nscalar \nImageExtensions \nImageUtil"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends.html","title":"Furnace.Backends","content":"BackendTensorStatics \nRawTensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data.html","title":"Furnace.Data","content":"DataLoader \nDataset \nDatasetSubset \nTensorDataset \nTextDataset \nDataUtil \nCIFAR10 \nCIFAR100 \nImageDataset \nMNIST"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions.html","title":"Furnace.Distributions","content":"Bernoulli \nCategorical \nDistribution\u003C\u0027T\u003E \nEmpirical\u003C\u0027T\u003E \nNormal \nTensorDistribution \nUniform"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model.html","title":"Furnace.Model","content":"RecurrentShape \nBatchNorm1d \nBatchNorm2d \nBatchNorm3d \nConv1d \nConv2d \nConv3d \nConvTranspose1d \nConvTranspose2d \nConvTranspose3d \nDropout \nDropout2d \nDropout3d \nLSTM \nLSTMCell \nLinear \nMode \nModel\u003C\u0027In, \u0027Out\u003E \nModel \nModelBase \nParameter \nParameterDict \nRNN \nRNNCell \nSequential \nVAE \nVAEBase \nVAEMLP \nWeight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim.html","title":"Furnace.Optim","content":"Adam \nOptimizer \nSGD \noptim"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util.html","title":"Furnace.Util","content":"Array \nArray4D \nArray5D \nArray6D \nArrayND \nDataConverter \nDictionary \nExtensionAutoOpens \nOrderedDictionary \nRandom \nSeq \nUtilAutoOpens \nGlobalNestingLevel \nNestingLevel \nRandom \nhelpers \nPyplot"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backendmodule.html","title":"Backend","content":"Backend \n\n Contains functions and settings related to backend specifications.\n \nBackend.Register \nRegister \nBackend.Default \nDefault"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backendmodule.html#Register","title":"Backend.Register","content":"Backend.Register \nRegister \n\n Register a new backend\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backendmodule.html#Default","title":"Backend.Default","content":"Backend.Default \nDefault \n\n Get or set the default backend used when creating tensors. Note, use \u003Cc\u003EFurnaceImage.config(...)\u003C/c\u003E instead.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html","title":"Compose","content":"Compose \n \nCompose.tensor \ntensor \nCompose.multinomial \nmultinomial \nCompose.bernoulli \nbernoulli \nCompose.dropout \ndropout \nCompose.dropout2d \ndropout2d \nCompose.dropout3d \ndropout3d \nCompose.zerosLike \nzerosLike \nCompose.onesLike \nonesLike \nCompose.fullLike \nfullLike \nCompose.arangeLike \narangeLike \nCompose.arangeLike \narangeLike \nCompose.linspaceLike \nlinspaceLike \nCompose.linspaceLike \nlinspaceLike \nCompose.logspaceLike \nlogspaceLike \nCompose.logspaceLike \nlogspaceLike \nCompose.onehotLike \nonehotLike \nCompose.randLike \nrandLike \nCompose.randnLike \nrandnLike \nCompose.randintLike \nrandintLike \nCompose.like \nlike \nCompose.lt \nlt \nCompose.gt \ngt \nCompose.le \nle \nCompose.ge \nge \nCompose.eq \neq \nCompose.clamp \nclamp \nCompose.diagonal \ndiagonal \nCompose.expand \nexpand \nCompose.expandAs \nexpandAs \nCompose.stack \nstack \nCompose.unstack \nunstack \nCompose.cat \ncat \nCompose.split \nsplit \nCompose.add \nadd \nCompose.sub \nsub \nCompose.mul \nmul \nCompose.div \ndiv \nCompose.pow \npow \nCompose.matmul \nmatmul \nCompose.dot \ndot \nCompose.sum \nsum \nCompose.mean \nmean \nCompose.var \nvar \nCompose.std \nstd \nCompose.cov \ncov \nCompose.corrcoef \ncorrcoef \nCompose.gather \ngather \nCompose.scatter \nscatter \nCompose.transpose \ntranspose \nCompose.squeeze \nsqueeze \nCompose.unsqueeze \nunsqueeze \nCompose.unsqueezeAs \nunsqueezeAs \nCompose.flip \nflip \nCompose.dilate \ndilate \nCompose.undilate \nundilate \nCompose.repeat \nrepeat \nCompose.slice \nslice \nCompose.view \nview \nCompose.view \nview \nCompose.viewAs \nviewAs \nCompose.flatten \nflatten \nCompose.unflatten \nunflatten \nCompose.leakyRelu \nleakyRelu \nCompose.softmax \nsoftmax \nCompose.logsoftmax \nlogsoftmax \nCompose.logsumexp \nlogsumexp \nCompose.mseLoss \nmseLoss \nCompose.bceLoss \nbceLoss \nCompose.nllLoss \nnllLoss \nCompose.crossEntropyLoss \ncrossEntropyLoss \nCompose.maxpool1d \nmaxpool1d \nCompose.maxpool2d \nmaxpool2d \nCompose.maxpool3d \nmaxpool3d \nCompose.maxunpool1d \nmaxunpool1d \nCompose.maxunpool2d \nmaxunpool2d \nCompose.maxunpool3d \nmaxunpool3d \nCompose.conv1d \nconv1d \nCompose.conv2d \nconv2d \nCompose.conv3d \nconv3d \nCompose.convTranspose1d \nconvTranspose1d \nCompose.convTranspose2d \nconvTranspose2d \nCompose.convTranspose3d \nconvTranspose3d \nCompose.pad \npad \nCompose.toImage \ntoImage \nCompose.toImageString \ntoImageString \nCompose.cast \ncast \nCompose.move \nmove"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#tensor","title":"Compose.tensor","content":"Compose.tensor \ntensor \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#multinomial","title":"Compose.multinomial","content":"Compose.multinomial \nmultinomial \nTBDReturns a tensor where each row contains \u003Cspan class=\u0022fsdocs-param-name\u0022\u003EnumSamples\u003C/span\u003E indices sampled from the multinomial probability distribution located in the corresponding row of tensor input. \n\n Indices are ordered from left to right according to when each was sampled (first samples are placed in first column).\n \n If input is a vector, out is a vector of size num_samples.\n \n If input is a matrix with m rows, the result is an matrix of shape (m \u00D7 numSamples)\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#bernoulli","title":"Compose.bernoulli","content":"Compose.bernoulli \nbernoulli \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#dropout","title":"Compose.dropout","content":"Compose.dropout \ndropout \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#dropout2d","title":"Compose.dropout2d","content":"Compose.dropout2d \ndropout2d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#dropout3d","title":"Compose.dropout3d","content":"Compose.dropout3d \ndropout3d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#zerosLike","title":"Compose.zerosLike","content":"Compose.zerosLike \nzerosLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#onesLike","title":"Compose.onesLike","content":"Compose.onesLike \nonesLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#fullLike","title":"Compose.fullLike","content":"Compose.fullLike \nfullLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#arangeLike","title":"Compose.arangeLike","content":"Compose.arangeLike \narangeLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#arangeLike","title":"Compose.arangeLike","content":"Compose.arangeLike \narangeLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#linspaceLike","title":"Compose.linspaceLike","content":"Compose.linspaceLike \nlinspaceLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#linspaceLike","title":"Compose.linspaceLike","content":"Compose.linspaceLike \nlinspaceLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#logspaceLike","title":"Compose.logspaceLike","content":"Compose.logspaceLike \nlogspaceLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#logspaceLike","title":"Compose.logspaceLike","content":"Compose.logspaceLike \nlogspaceLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#onehotLike","title":"Compose.onehotLike","content":"Compose.onehotLike \nonehotLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#randLike","title":"Compose.randLike","content":"Compose.randLike \nrandLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#randnLike","title":"Compose.randnLike","content":"Compose.randnLike \nrandnLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#randintLike","title":"Compose.randintLike","content":"Compose.randintLike \nrandintLike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#like","title":"Compose.like","content":"Compose.like \nlike \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#lt","title":"Compose.lt","content":"Compose.lt \nlt \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#gt","title":"Compose.gt","content":"Compose.gt \ngt \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#le","title":"Compose.le","content":"Compose.le \nle \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#ge","title":"Compose.ge","content":"Compose.ge \nge \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#eq","title":"Compose.eq","content":"Compose.eq \neq \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#clamp","title":"Compose.clamp","content":"Compose.clamp \nclamp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#diagonal","title":"Compose.diagonal","content":"Compose.diagonal \ndiagonal \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#expand","title":"Compose.expand","content":"Compose.expand \nexpand \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#expandAs","title":"Compose.expandAs","content":"Compose.expandAs \nexpandAs \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#stack","title":"Compose.stack","content":"Compose.stack \nstack \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#unstack","title":"Compose.unstack","content":"Compose.unstack \nunstack \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#cat","title":"Compose.cat","content":"Compose.cat \ncat \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#split","title":"Compose.split","content":"Compose.split \nsplit \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#add","title":"Compose.add","content":"Compose.add \nadd \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#sub","title":"Compose.sub","content":"Compose.sub \nsub \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#mul","title":"Compose.mul","content":"Compose.mul \nmul \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#div","title":"Compose.div","content":"Compose.div \ndiv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#pow","title":"Compose.pow","content":"Compose.pow \npow \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#matmul","title":"Compose.matmul","content":"Compose.matmul \nmatmul \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#dot","title":"Compose.dot","content":"Compose.dot \ndot \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#sum","title":"Compose.sum","content":"Compose.sum \nsum \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#mean","title":"Compose.mean","content":"Compose.mean \nmean \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#var","title":"Compose.var","content":"Compose.var \nvar \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#std","title":"Compose.std","content":"Compose.std \nstd \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#cov","title":"Compose.cov","content":"Compose.cov \ncov \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#corrcoef","title":"Compose.corrcoef","content":"Compose.corrcoef \ncorrcoef \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#gather","title":"Compose.gather","content":"Compose.gather \ngather \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#scatter","title":"Compose.scatter","content":"Compose.scatter \nscatter \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#transpose","title":"Compose.transpose","content":"Compose.transpose \ntranspose \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#squeeze","title":"Compose.squeeze","content":"Compose.squeeze \nsqueeze \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#unsqueeze","title":"Compose.unsqueeze","content":"Compose.unsqueeze \nunsqueeze \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#unsqueezeAs","title":"Compose.unsqueezeAs","content":"Compose.unsqueezeAs \nunsqueezeAs \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#flip","title":"Compose.flip","content":"Compose.flip \nflip \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#dilate","title":"Compose.dilate","content":"Compose.dilate \ndilate \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#undilate","title":"Compose.undilate","content":"Compose.undilate \nundilate \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#repeat","title":"Compose.repeat","content":"Compose.repeat \nrepeat \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#slice","title":"Compose.slice","content":"Compose.slice \nslice \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#view","title":"Compose.view","content":"Compose.view \nview \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#view","title":"Compose.view","content":"Compose.view \nview \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#viewAs","title":"Compose.viewAs","content":"Compose.viewAs \nviewAs \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#flatten","title":"Compose.flatten","content":"Compose.flatten \nflatten \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#unflatten","title":"Compose.unflatten","content":"Compose.unflatten \nunflatten \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#leakyRelu","title":"Compose.leakyRelu","content":"Compose.leakyRelu \nleakyRelu \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#softmax","title":"Compose.softmax","content":"Compose.softmax \nsoftmax \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#logsoftmax","title":"Compose.logsoftmax","content":"Compose.logsoftmax \nlogsoftmax \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#logsumexp","title":"Compose.logsumexp","content":"Compose.logsumexp \nlogsumexp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#mseLoss","title":"Compose.mseLoss","content":"Compose.mseLoss \nmseLoss \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#bceLoss","title":"Compose.bceLoss","content":"Compose.bceLoss \nbceLoss \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#nllLoss","title":"Compose.nllLoss","content":"Compose.nllLoss \nnllLoss \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#crossEntropyLoss","title":"Compose.crossEntropyLoss","content":"Compose.crossEntropyLoss \ncrossEntropyLoss \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#maxpool1d","title":"Compose.maxpool1d","content":"Compose.maxpool1d \nmaxpool1d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#maxpool2d","title":"Compose.maxpool2d","content":"Compose.maxpool2d \nmaxpool2d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#maxpool3d","title":"Compose.maxpool3d","content":"Compose.maxpool3d \nmaxpool3d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#maxunpool1d","title":"Compose.maxunpool1d","content":"Compose.maxunpool1d \nmaxunpool1d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#maxunpool2d","title":"Compose.maxunpool2d","content":"Compose.maxunpool2d \nmaxunpool2d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#maxunpool3d","title":"Compose.maxunpool3d","content":"Compose.maxunpool3d \nmaxunpool3d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#conv1d","title":"Compose.conv1d","content":"Compose.conv1d \nconv1d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#conv2d","title":"Compose.conv2d","content":"Compose.conv2d \nconv2d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#conv3d","title":"Compose.conv3d","content":"Compose.conv3d \nconv3d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#convTranspose1d","title":"Compose.convTranspose1d","content":"Compose.convTranspose1d \nconvTranspose1d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#convTranspose2d","title":"Compose.convTranspose2d","content":"Compose.convTranspose2d \nconvTranspose2d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#convTranspose3d","title":"Compose.convTranspose3d","content":"Compose.convTranspose3d \nconvTranspose3d \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#pad","title":"Compose.pad","content":"Compose.pad \npad \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#toImage","title":"Compose.toImage","content":"Compose.toImage \ntoImage \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#toImageString","title":"Compose.toImageString","content":"Compose.toImageString \ntoImageString \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#cast","title":"Compose.cast","content":"Compose.cast \ncast \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-compose.html#move","title":"Compose.move","content":"Compose.move \nmove \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicemodule.html","title":"Device","content":"Device \n\n Contains functions and settings related to device specifications.\n \nDevice.Default \nDefault"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicemodule.html#Default","title":"Device.Default","content":"Device.Default \nDefault \n\n Get or set the default device used when creating tensors. Note, use \u003Cc\u003EFurnaceImage.config(...)\u003C/c\u003E instead.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypemodule.html","title":"Dtype","content":"Dtype \n\n Contains functions and settings related to tensor element types\n \nDtype.widen \nwiden \nDtype.Default \nDefault \nDtype.divisionType \ndivisionType \nDtype.(|FloatingPoint|_|) \n(|FloatingPoint|_|) \nDtype.(|Integral|_|) \n(|Integral|_|) \nDtype.(|IntegralOrBool|_|) \n(|IntegralOrBool|_|)"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypemodule.html#widen","title":"Dtype.widen","content":"Dtype.widen \nwiden \n\n Find the Dtype into which dtype1 and dtype2 can be widened\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypemodule.html#Default","title":"Dtype.Default","content":"Dtype.Default \nDefault \n\n Get or set the default element type used when creating tensors. Only floating point types are supported as the default type. Note, use \u003Cc\u003EFurnaceImage.config(...)\u003C/c\u003E instead.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypemodule.html#divisionType","title":"Dtype.divisionType","content":"Dtype.divisionType \ndivisionType \n\n Find the Dtype which would result from dividing tensors with dtype1 and dtype2\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypemodule.html#(|FloatingPoint|_|)","title":"Dtype.(|FloatingPoint|_|)","content":"Dtype.(|FloatingPoint|_|) \n(|FloatingPoint|_|) \n\n Matches all floating point tensor element types\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypemodule.html#(|Integral|_|)","title":"Dtype.(|Integral|_|)","content":"Dtype.(|Integral|_|) \n(|Integral|_|) \n\n Matches all integral tensor element types\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypemodule.html#(|IntegralOrBool|_|)","title":"Dtype.(|IntegralOrBool|_|)","content":"Dtype.(|IntegralOrBool|_|) \n(|IntegralOrBool|_|) \n\n Matches all integral or boolean tensor element types\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html","title":"DtypeAutoOpens","content":"DtypeAutoOpens \n\n Contains global functions and settings related to tensor element types, used when writing backends.\n \nDtypeAutoOpens.opNotSupported \nopNotSupported \nDtypeAutoOpens.opNotSupportedOnDeviceType \nopNotSupportedOnDeviceType \nDtypeAutoOpens.opNotSupported2 \nopNotSupported2 \nDtypeAutoOpens.IsFloatingPoint \nIsFloatingPoint \nDtypeAutoOpens.IsFloatingPoint \nIsFloatingPoint \nDtypeAutoOpens.IsIntegral \nIsIntegral \nDtypeAutoOpens.IsIntegral \nIsIntegral"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html#opNotSupported","title":"DtypeAutoOpens.opNotSupported","content":"DtypeAutoOpens.opNotSupported \nopNotSupported \n\n Raise an exception indicating the given operation is not supported for the given tensor element type.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html#opNotSupportedOnDeviceType","title":"DtypeAutoOpens.opNotSupportedOnDeviceType","content":"DtypeAutoOpens.opNotSupportedOnDeviceType \nopNotSupportedOnDeviceType \n\n Raise an exception indicating the given operation is not supported for the given tensor device type.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html#opNotSupported2","title":"DtypeAutoOpens.opNotSupported2","content":"DtypeAutoOpens.opNotSupported2 \nopNotSupported2 \n\n Raise an exception indicating the given binary operation is not supported for the two given tensor element types.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html#IsFloatingPoint","title":"DtypeAutoOpens.IsFloatingPoint","content":"DtypeAutoOpens.IsFloatingPoint \nIsFloatingPoint \n\n Matches all floating point tensor element types\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html#IsFloatingPoint","title":"DtypeAutoOpens.IsFloatingPoint","content":"DtypeAutoOpens.IsFloatingPoint \nIsFloatingPoint \n\n Matches all floating point tensor element types\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html#IsIntegral","title":"DtypeAutoOpens.IsIntegral","content":"DtypeAutoOpens.IsIntegral \nIsIntegral \n\n Matches all integral tensor element types\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtypeautoopens.html#IsIntegral","title":"DtypeAutoOpens.IsIntegral","content":"DtypeAutoOpens.IsIntegral \nIsIntegral \n\n Matches all integral tensor element types\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html","title":"Numerical","content":"Numerical \n \nNumerical.Shorten \nShorten \nNumerical.numdiff \nnumdiff \nNumerical.numfdiff \nnumfdiff \nNumerical.numfdiff2 \nnumfdiff2 \nNumerical.numdiff2 \nnumdiff2 \nNumerical.numjacobianv \nnumjacobianv \nNumerical.numfjacobianv \nnumfjacobianv \nNumerical.numfjacobian \nnumfjacobian \nNumerical.numjacobian \nnumjacobian \nNumerical.numgradv \nnumgradv \nNumerical.numfgradv \nnumfgradv \nNumerical.numfgrad \nnumfgrad \nNumerical.numgrad \nnumgrad \nNumerical.numfgradhessian \nnumfgradhessian \nNumerical.numgradhessian \nnumgradhessian \nNumerical.numfhessian \nnumfhessian \nNumerical.numhessian \nnumhessian \nNumerical.numfhessianv \nnumfhessianv \nNumerical.numhessianv \nnumhessianv \nNumerical.numflaplacian \nnumflaplacian \nNumerical.numlaplacian \nnumlaplacian \nNumerical.numfcurl \nnumfcurl \nNumerical.numcurl \nnumcurl \nNumerical.numfdivergence \nnumfdivergence \nNumerical.numdivergence \nnumdivergence \nNumerical.numfcurldivergence \nnumfcurldivergence \nNumerical.numcurldivergence \nnumcurldivergence"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numdiff","title":"Numerical.numdiff","content":"Numerical.numdiff \nnumdiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfdiff","title":"Numerical.numfdiff","content":"Numerical.numfdiff \nnumfdiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfdiff2","title":"Numerical.numfdiff2","content":"Numerical.numfdiff2 \nnumfdiff2 \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numdiff2","title":"Numerical.numdiff2","content":"Numerical.numdiff2 \nnumdiff2 \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numjacobianv","title":"Numerical.numjacobianv","content":"Numerical.numjacobianv \nnumjacobianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfjacobianv","title":"Numerical.numfjacobianv","content":"Numerical.numfjacobianv \nnumfjacobianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfjacobian","title":"Numerical.numfjacobian","content":"Numerical.numfjacobian \nnumfjacobian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numjacobian","title":"Numerical.numjacobian","content":"Numerical.numjacobian \nnumjacobian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numgradv","title":"Numerical.numgradv","content":"Numerical.numgradv \nnumgradv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfgradv","title":"Numerical.numfgradv","content":"Numerical.numfgradv \nnumfgradv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfgrad","title":"Numerical.numfgrad","content":"Numerical.numfgrad \nnumfgrad \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numgrad","title":"Numerical.numgrad","content":"Numerical.numgrad \nnumgrad \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfgradhessian","title":"Numerical.numfgradhessian","content":"Numerical.numfgradhessian \nnumfgradhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numgradhessian","title":"Numerical.numgradhessian","content":"Numerical.numgradhessian \nnumgradhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfhessian","title":"Numerical.numfhessian","content":"Numerical.numfhessian \nnumfhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numhessian","title":"Numerical.numhessian","content":"Numerical.numhessian \nnumhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfhessianv","title":"Numerical.numfhessianv","content":"Numerical.numfhessianv \nnumfhessianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numhessianv","title":"Numerical.numhessianv","content":"Numerical.numhessianv \nnumhessianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numflaplacian","title":"Numerical.numflaplacian","content":"Numerical.numflaplacian \nnumflaplacian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numlaplacian","title":"Numerical.numlaplacian","content":"Numerical.numlaplacian \nnumlaplacian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfcurl","title":"Numerical.numfcurl","content":"Numerical.numfcurl \nnumfcurl \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numcurl","title":"Numerical.numcurl","content":"Numerical.numcurl \nnumcurl \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfdivergence","title":"Numerical.numfdivergence","content":"Numerical.numfdivergence \nnumfdivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numdivergence","title":"Numerical.numdivergence","content":"Numerical.numdivergence \nnumdivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numfcurldivergence","title":"Numerical.numfcurldivergence","content":"Numerical.numfcurldivergence \nnumfcurldivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical.html#numcurldivergence","title":"Numerical.numcurldivergence","content":"Numerical.numcurldivergence \nnumcurldivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html","title":"Shorten","content":"Shorten \n \nShorten.numgvp \nnumgvp \nShorten.numg \nnumg \nShorten.numhvp \nnumhvp \nShorten.numh \nnumh \nShorten.numgh \nnumgh \nShorten.numjvp \nnumjvp \nShorten.numj \nnumj \nShorten.numfgvp \nnumfgvp \nShorten.numfg \nnumfg \nShorten.numfhvp \nnumfhvp \nShorten.numfh \nnumfh \nShorten.numfgh \nnumfgh \nShorten.numfjvp \nnumfjvp \nShorten.numfj \nnumfj"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numgvp","title":"Shorten.numgvp","content":"Shorten.numgvp \nnumgvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numg","title":"Shorten.numg","content":"Shorten.numg \nnumg \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numhvp","title":"Shorten.numhvp","content":"Shorten.numhvp \nnumhvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numh","title":"Shorten.numh","content":"Shorten.numh \nnumh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numgh","title":"Shorten.numgh","content":"Shorten.numgh \nnumgh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numjvp","title":"Shorten.numjvp","content":"Shorten.numjvp \nnumjvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numj","title":"Shorten.numj","content":"Shorten.numj \nnumj \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numfgvp","title":"Shorten.numfgvp","content":"Shorten.numfgvp \nnumfgvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numfg","title":"Shorten.numfg","content":"Shorten.numfg \nnumfg \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numfhvp","title":"Shorten.numfhvp","content":"Shorten.numfhvp \nnumfhvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numfh","title":"Shorten.numfh","content":"Shorten.numfh \nnumfh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numfgh","title":"Shorten.numfgh","content":"Shorten.numfgh \nnumfgh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numfjvp","title":"Shorten.numfjvp","content":"Shorten.numfjvp \nnumfjvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-numerical-shorten.html#numfj","title":"Shorten.numfj","content":"Shorten.numfj \nnumfj \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opavgpoolextensions.html","title":"OpAvgPoolExtensions","content":"OpAvgPoolExtensions \n \nOpAvgPoolExtensions.avgpool1d \navgpool1d \nOpAvgPoolExtensions.avgpool2d \navgpool2d \nOpAvgPoolExtensions.avgpool3d \navgpool3d \nOpAvgPoolExtensions.avgpool1d \navgpool1d \nOpAvgPoolExtensions.avgpool2d \navgpool2d \nOpAvgPoolExtensions.avgpool3d \navgpool3d"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opavgpoolextensions.html#avgpool1d","title":"OpAvgPoolExtensions.avgpool1d","content":"OpAvgPoolExtensions.avgpool1d \navgpool1d \nApplies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opavgpoolextensions.html#avgpool2d","title":"OpAvgPoolExtensions.avgpool2d","content":"OpAvgPoolExtensions.avgpool2d \navgpool2d \nApplies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opavgpoolextensions.html#avgpool3d","title":"OpAvgPoolExtensions.avgpool3d","content":"OpAvgPoolExtensions.avgpool3d \navgpool3d \nApplies a 3D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opavgpoolextensions.html#avgpool1d","title":"OpAvgPoolExtensions.avgpool1d","content":"OpAvgPoolExtensions.avgpool1d \navgpool1d \nApplies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opavgpoolextensions.html#avgpool2d","title":"OpAvgPoolExtensions.avgpool2d","content":"OpAvgPoolExtensions.avgpool2d \navgpool2d \nApplies a 2D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opavgpoolextensions.html#avgpool3d","title":"OpAvgPoolExtensions.avgpool3d","content":"OpAvgPoolExtensions.avgpool3d \navgpool3d \nApplies a 2D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opbmmextensions.html","title":"OpBMMExtensions","content":"OpBMMExtensions \n \nOpBMMExtensions.bmm \nbmm \nOpBMMExtensions.bmm \nbmm"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opbmmextensions.html#bmm","title":"OpBMMExtensions.bmm","content":"OpBMMExtensions.bmm \nbmm \nBatched matrix product of two tensors. Tensors \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E must be 3d tensors each containing the same number of matrices. If the tensor is a \\(b \\times n \\times m\\) tensor, and \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E is a \\(b \\times m \\times p\\) tensor, the result will be a \\(b \\times n \\times p\\) tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opbmmextensions.html#bmm","title":"OpBMMExtensions.bmm","content":"OpBMMExtensions.bmm \nbmm \nBatched matrix product of two tensors. Tensors \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E and \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E must be 3d tensors each containing the same number of matrices. If \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E is a \\(b \\times n \\times m\\) tensor, \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E is a \\(b \\times m \\times p\\) tensor, the result will be a \\(b \\times n \\times p\\) tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opdetextensions.html","title":"OpDetExtensions","content":"OpDetExtensions \n \nOpDetExtensions.det \ndet \nOpDetExtensions.det \ndet"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opdetextensions.html#det","title":"OpDetExtensions.det","content":"OpDetExtensions.det \ndet \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opdetextensions.html#det","title":"OpDetExtensions.det","content":"OpDetExtensions.det \ndet \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opinvextensions.html","title":"OpInvExtensions","content":"OpInvExtensions \n \nOpInvExtensions.inv \ninv \nOpInvExtensions.inv \ninv"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opinvextensions.html#inv","title":"OpInvExtensions.inv","content":"OpInvExtensions.inv \ninv \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opinvextensions.html#inv","title":"OpInvExtensions.inv","content":"OpInvExtensions.inv \ninv \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opnormextensions.html","title":"OpNormExtensions","content":"OpNormExtensions \n \nOpNormExtensions.norm \nnorm \nOpNormExtensions.norm \nnorm"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opnormextensions.html#norm","title":"OpNormExtensions.norm","content":"OpNormExtensions.norm \nnorm \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opnormextensions.html#norm","title":"OpNormExtensions.norm","content":"OpNormExtensions.norm \nnorm \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opouterextensions.html","title":"OpOuterExtensions","content":"OpOuterExtensions \n \nOpOuterExtensions.outer \nouter \nOpOuterExtensions.outer \nouter"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opouterextensions.html#outer","title":"OpOuterExtensions.outer","content":"OpOuterExtensions.outer \nouter \nOuter product of two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opouterextensions.html#outer","title":"OpOuterExtensions.outer","content":"OpOuterExtensions.outer \nouter \nOuter product of two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opsolveextensions.html","title":"OpSolveExtensions","content":"OpSolveExtensions \n \nOpSolveExtensions.solve \nsolve \nOpSolveExtensions.solve \nsolve"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opsolveextensions.html#solve","title":"OpSolveExtensions.solve","content":"OpSolveExtensions.solve \nsolve \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-opsolveextensions.html#solve","title":"OpSolveExtensions.solve","content":"OpSolveExtensions.solve \nsolve \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printermodule.html","title":"Printer","content":"Printer \n\n Contains functions and settings related to print options.\n \nPrinter.Default \nDefault"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printermodule.html#Default","title":"Printer.Default","content":"Printer.Default \nDefault \n\n Get or set the default printer used when printing tensors. Note, use \u003Cc\u003EFurnaceImage.config(...)\u003C/c\u003E instead.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html","title":"ScalarExtensions","content":"ScalarExtensions \n \nScalarExtensions.tryWidenScalar \ntryWidenScalar \nScalarExtensions.widenScalarForDivision \nwidenScalarForDivision \nScalarExtensions.toSingle \ntoSingle \nScalarExtensions.toDouble \ntoDouble \nScalarExtensions.toInt64 \ntoInt64 \nScalarExtensions.toInt32 \ntoInt32 \nScalarExtensions.toInt16 \ntoInt16 \nScalarExtensions.toSByte \ntoSByte \nScalarExtensions.toByte \ntoByte \nScalarExtensions.toBool \ntoBool \nScalarExtensions.sub \nsub \nScalarExtensions.log \nlog \nScalarExtensions.neg \nneg \nScalarExtensions.dtype \ndtype \nScalarExtensions.dtype \ndtype \nScalarExtensions.cast \ncast"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#tryWidenScalar","title":"ScalarExtensions.tryWidenScalar","content":"ScalarExtensions.tryWidenScalar \ntryWidenScalar \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#widenScalarForDivision","title":"ScalarExtensions.widenScalarForDivision","content":"ScalarExtensions.widenScalarForDivision \nwidenScalarForDivision \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toSingle","title":"ScalarExtensions.toSingle","content":"ScalarExtensions.toSingle \ntoSingle \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toDouble","title":"ScalarExtensions.toDouble","content":"ScalarExtensions.toDouble \ntoDouble \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toInt64","title":"ScalarExtensions.toInt64","content":"ScalarExtensions.toInt64 \ntoInt64 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toInt32","title":"ScalarExtensions.toInt32","content":"ScalarExtensions.toInt32 \ntoInt32 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toInt16","title":"ScalarExtensions.toInt16","content":"ScalarExtensions.toInt16 \ntoInt16 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toSByte","title":"ScalarExtensions.toSByte","content":"ScalarExtensions.toSByte \ntoSByte \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toByte","title":"ScalarExtensions.toByte","content":"ScalarExtensions.toByte \ntoByte \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#toBool","title":"ScalarExtensions.toBool","content":"ScalarExtensions.toBool \ntoBool \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#sub","title":"ScalarExtensions.sub","content":"ScalarExtensions.sub \nsub \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#log","title":"ScalarExtensions.log","content":"ScalarExtensions.log \nlog \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#neg","title":"ScalarExtensions.neg","content":"ScalarExtensions.neg \nneg \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#dtype","title":"ScalarExtensions.dtype","content":"ScalarExtensions.dtype \ndtype \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#dtype","title":"ScalarExtensions.dtype","content":"ScalarExtensions.dtype \ndtype \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalarextensions.html#cast","title":"ScalarExtensions.cast","content":"ScalarExtensions.cast \ncast \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html","title":"Shape","content":"Shape \n\n Contains functions and values related to tensor shapes.\n \nShape.nelement \nnelement \nShape.scalar \nscalar \nShape.contains \ncontains \nShape.checkCanStack \ncheckCanStack \nShape.checkCanGetSlice \ncheckCanGetSlice \nShape.checkCanIndex \ncheckCanIndex \nShape.dilated \ndilated \nShape.checkCanCat \ncheckCanCat \nShape.checkCanSplit \ncheckCanSplit \nShape.checkCanUnstack \ncheckCanUnstack \nShape.computeTranspose2d \ncomputeTranspose2d \nShape.checkDeviceTypes \ncheckDeviceTypes \nShape.checkDtypes \ncheckDtypes \nShape.checkCanConv1d \ncheckCanConv1d \nShape.checkCanConv2d \ncheckCanConv2d \nShape.checkCanConv3d \ncheckCanConv3d \nShape.checkCanConvTranspose1d \ncheckCanConvTranspose1d \nShape.checkCanConvTranspose2d \ncheckCanConvTranspose2d \nShape.checkCanConvTranspose3d \ncheckCanConvTranspose3d \nShape.checkCanMaxOrAvgpool1d \ncheckCanMaxOrAvgpool1d \nShape.checkCanMaxpool1d \ncheckCanMaxpool1d \nShape.checkCanAvgpool1d \ncheckCanAvgpool1d \nShape.checkCanMaxOrAvgpool2d \ncheckCanMaxOrAvgpool2d \nShape.checkCanMaxpool2d \ncheckCanMaxpool2d \nShape.checkCanAvgpool2d \ncheckCanAvgpool2d \nShape.checkCanMaxOrAvgpool3d \ncheckCanMaxOrAvgpool3d \nShape.checkCanMaxpool3d \ncheckCanMaxpool3d \nShape.checkCanAvgpool3d \ncheckCanAvgpool3d \nShape.checkCanMaxunpool1d \ncheckCanMaxunpool1d \nShape.checkCanMaxunpool2d \ncheckCanMaxunpool2d \nShape.checkCanMaxunpool3d \ncheckCanMaxunpool3d \nShape.canExpand \ncanExpand \nShape.checkCanExpand \ncheckCanExpand \nShape.checkCanTranspose \ncheckCanTranspose \nShape.checkCanTranspose2d \ncheckCanTranspose2d \nShape.checkCanInvert \ncheckCanInvert \nShape.checkCanDet \ncheckCanDet \nShape.checkCanSolve \ncheckCanSolve \nShape.checkCanPermute \ncheckCanPermute \nShape.checkCanFlip \ncheckCanFlip \nShape.checkCanRepeat \ncheckCanRepeat \nShape.checkCanDilate \ncheckCanDilate \nShape.checkCanGather \ncheckCanGather \nShape.checkCanScatter \ncheckCanScatter \nShape.checkCanView \ncheckCanView \nShape.checkCanFlatten \ncheckCanFlatten \nShape.checkCanAddSlice \ncheckCanAddSlice \nShape.checkCanMatmul \ncheckCanMatmul \nShape.checkCanBMM \ncheckCanBMM \nShape.checkCanDot \ncheckCanDot \nShape.checkCanPad \ncheckCanPad \nShape.checkCanDropout \ncheckCanDropout \nShape.checkCanDropout2d \ncheckCanDropout2d \nShape.checkCanDropout3d \ncheckCanDropout3d \nShape.squeeze \nsqueeze \nShape.checkCanMinMaxReduce \ncheckCanMinMaxReduce \nShape.checkCanUnsqueeze \ncheckCanUnsqueeze \nShape.unsqueezeAs \nunsqueezeAs \nShape.locationToBounds \nlocationToBounds \nShape.flatten \nflatten \nShape.broadcast2 \nbroadcast2 \nShape.broadcastShapes \nbroadcastShapes \nShape.undilatedShape \nundilatedShape \nShape.complete \ncomplete \nShape.completeDim \ncompleteDim \nShape.completeDimUnsqueeze \ncompleteDimUnsqueeze \nShape.completeExpand \ncompleteExpand \nShape.completeSliceBounds \ncompleteSliceBounds \nShape.create \ncreate \nShape.resolve2dKernelSizes \nresolve2dKernelSizes \nShape.resolve3dKernelSizes \nresolve3dKernelSizes \nShape.resolve2dConvSizes \nresolve2dConvSizes \nShape.resolve3dConvSizes \nresolve3dConvSizes \nShape.resolve2dConvOutputPadding \nresolve2dConvOutputPadding \nShape.resolve3dConvOutputPadding \nresolve3dConvOutputPadding \nShape.resolve2dMaxPoolSizes \nresolve2dMaxPoolSizes \nShape.resolve3dMaxPoolSizes \nresolve3dMaxPoolSizes"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#nelement","title":"Shape.nelement","content":"Shape.nelement \nnelement \n\n Gets the total number of elements in the shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#scalar","title":"Shape.scalar","content":"Shape.scalar \nscalar \n\n The shape for a scalar value.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#contains","title":"Shape.contains","content":"Shape.contains \ncontains \n\n Indicates if one shape contains another.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanStack","title":"Shape.checkCanStack","content":"Shape.checkCanStack \ncheckCanStack \n\n Checks if the given shapes are appropriate for a stack operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanGetSlice","title":"Shape.checkCanGetSlice","content":"Shape.checkCanGetSlice \ncheckCanGetSlice \n\n Checks if the given shapes are appropriate for a GetSlice operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanIndex","title":"Shape.checkCanIndex","content":"Shape.checkCanIndex \ncheckCanIndex \n\n Checks if the given index is valid in the context of the given shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#dilated","title":"Shape.dilated","content":"Shape.dilated \ndilated \n\n Computes the shape that results from a dilation operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanCat","title":"Shape.checkCanCat","content":"Shape.checkCanCat \ncheckCanCat \n\n Checks if the given shapes are appropriate for a concatenation operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanSplit","title":"Shape.checkCanSplit","content":"Shape.checkCanSplit \ncheckCanSplit \n\n Checks if the given shapes are appropriate for a split operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanUnstack","title":"Shape.checkCanUnstack","content":"Shape.checkCanUnstack \ncheckCanUnstack \n\n Checks if the given shapes are appropriate for an unstack operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#computeTranspose2d","title":"Shape.computeTranspose2d","content":"Shape.computeTranspose2d \ncomputeTranspose2d \n\n Checks if the given shapes are appropriate for a transpose operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkDeviceTypes","title":"Shape.checkDeviceTypes","content":"Shape.checkDeviceTypes \ncheckDeviceTypes \n\n Checks if the two device types are equal.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkDtypes","title":"Shape.checkDtypes","content":"Shape.checkDtypes \ncheckDtypes \n\n Checks if the two tensor element types are equal.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanConv1d","title":"Shape.checkCanConv1d","content":"Shape.checkCanConv1d \ncheckCanConv1d \n\n Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanConv2d","title":"Shape.checkCanConv2d","content":"Shape.checkCanConv2d \ncheckCanConv2d \n\n Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanConv3d","title":"Shape.checkCanConv3d","content":"Shape.checkCanConv3d \ncheckCanConv3d \n\n Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanConvTranspose1d","title":"Shape.checkCanConvTranspose1d","content":"Shape.checkCanConvTranspose1d \ncheckCanConvTranspose1d \n\n Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanConvTranspose2d","title":"Shape.checkCanConvTranspose2d","content":"Shape.checkCanConvTranspose2d \ncheckCanConvTranspose2d \n\n Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanConvTranspose3d","title":"Shape.checkCanConvTranspose3d","content":"Shape.checkCanConvTranspose3d \ncheckCanConvTranspose3d \n\n Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxOrAvgpool1d","title":"Shape.checkCanMaxOrAvgpool1d","content":"Shape.checkCanMaxOrAvgpool1d \ncheckCanMaxOrAvgpool1d \n\n Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxpool1d","title":"Shape.checkCanMaxpool1d","content":"Shape.checkCanMaxpool1d \ncheckCanMaxpool1d \n\n Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanAvgpool1d","title":"Shape.checkCanAvgpool1d","content":"Shape.checkCanAvgpool1d \ncheckCanAvgpool1d \n\n Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxOrAvgpool2d","title":"Shape.checkCanMaxOrAvgpool2d","content":"Shape.checkCanMaxOrAvgpool2d \ncheckCanMaxOrAvgpool2d \n\n Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxpool2d","title":"Shape.checkCanMaxpool2d","content":"Shape.checkCanMaxpool2d \ncheckCanMaxpool2d \n\n Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanAvgpool2d","title":"Shape.checkCanAvgpool2d","content":"Shape.checkCanAvgpool2d \ncheckCanAvgpool2d \n\n Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxOrAvgpool3d","title":"Shape.checkCanMaxOrAvgpool3d","content":"Shape.checkCanMaxOrAvgpool3d \ncheckCanMaxOrAvgpool3d \n\n Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxpool3d","title":"Shape.checkCanMaxpool3d","content":"Shape.checkCanMaxpool3d \ncheckCanMaxpool3d \n\n Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanAvgpool3d","title":"Shape.checkCanAvgpool3d","content":"Shape.checkCanAvgpool3d \ncheckCanAvgpool3d \n\n Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxunpool1d","title":"Shape.checkCanMaxunpool1d","content":"Shape.checkCanMaxunpool1d \ncheckCanMaxunpool1d \n\n Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxunpool2d","title":"Shape.checkCanMaxunpool2d","content":"Shape.checkCanMaxunpool2d \ncheckCanMaxunpool2d \n\n Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMaxunpool3d","title":"Shape.checkCanMaxunpool3d","content":"Shape.checkCanMaxunpool3d \ncheckCanMaxunpool3d \n\n Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#canExpand","title":"Shape.canExpand","content":"Shape.canExpand \ncanExpand \n\n Indicates if one shape can expand into another through the addition of broadcast dimensions.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanExpand","title":"Shape.checkCanExpand","content":"Shape.checkCanExpand \ncheckCanExpand \n\n Checks if one shape can expand into another through the addition of broadcast dimensions.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanTranspose","title":"Shape.checkCanTranspose","content":"Shape.checkCanTranspose \ncheckCanTranspose \n\n Checks if the given shape is appropriate for a transpose operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanTranspose2d","title":"Shape.checkCanTranspose2d","content":"Shape.checkCanTranspose2d \ncheckCanTranspose2d \n\n Checks if the given shape is appropriate for a transpose operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanInvert","title":"Shape.checkCanInvert","content":"Shape.checkCanInvert \ncheckCanInvert \n\n Checks if the given shape is appropriate for a transpose operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanDet","title":"Shape.checkCanDet","content":"Shape.checkCanDet \ncheckCanDet \n\n Checks if the given shape is appropriate for a determinant operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanSolve","title":"Shape.checkCanSolve","content":"Shape.checkCanSolve \ncheckCanSolve \n\n Checks if the given shapes are appropriate for a linear solve operation, and returns the resulting shape of the solution\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanPermute","title":"Shape.checkCanPermute","content":"Shape.checkCanPermute \ncheckCanPermute \n\n Checks if the given shape is appropriate for a permute operation and returns information related to the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanFlip","title":"Shape.checkCanFlip","content":"Shape.checkCanFlip \ncheckCanFlip \n\n Checks if the given shape is appropriate for a flip operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanRepeat","title":"Shape.checkCanRepeat","content":"Shape.checkCanRepeat \ncheckCanRepeat \n\n Checks if the given shape is appropriate for a repeat operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanDilate","title":"Shape.checkCanDilate","content":"Shape.checkCanDilate \ncheckCanDilate \n\n Checks if the given shape is appropriate for a dilate operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanGather","title":"Shape.checkCanGather","content":"Shape.checkCanGather \ncheckCanGather \n\n Checks if the given shape is appropriate for a gather operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanScatter","title":"Shape.checkCanScatter","content":"Shape.checkCanScatter \ncheckCanScatter \n\n Checks if the given shape is appropriate for a scatter operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanView","title":"Shape.checkCanView","content":"Shape.checkCanView \ncheckCanView \n\n Checks if the given shape is appropriate for a view operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanFlatten","title":"Shape.checkCanFlatten","content":"Shape.checkCanFlatten \ncheckCanFlatten \n\n Checks if the given shape is appropriate for a flatten operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanAddSlice","title":"Shape.checkCanAddSlice","content":"Shape.checkCanAddSlice \ncheckCanAddSlice \n\n Checks if the given shape is appropriate for an addSlice operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMatmul","title":"Shape.checkCanMatmul","content":"Shape.checkCanMatmul \ncheckCanMatmul \n\n Checks if the given shapes are appropriate for a matmul operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanBMM","title":"Shape.checkCanBMM","content":"Shape.checkCanBMM \ncheckCanBMM \n\n Checks if the given shapes are appropriate for a batched matrix multiplication operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanDot","title":"Shape.checkCanDot","content":"Shape.checkCanDot \ncheckCanDot \n\n Checks if the given shape is appropriate for a dot product operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanPad","title":"Shape.checkCanPad","content":"Shape.checkCanPad \ncheckCanPad \n\n Checks if the given shape is appropriate for a pad operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanDropout","title":"Shape.checkCanDropout","content":"Shape.checkCanDropout \ncheckCanDropout \n\n Checks if the given shape is appropriate for a dropout operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanDropout2d","title":"Shape.checkCanDropout2d","content":"Shape.checkCanDropout2d \ncheckCanDropout2d \n\n Checks if the given shape is appropriate for a dropout2d operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanDropout3d","title":"Shape.checkCanDropout3d","content":"Shape.checkCanDropout3d \ncheckCanDropout3d \n\n Checks if the given shape is appropriate for a dropout3d operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#squeeze","title":"Shape.squeeze","content":"Shape.squeeze \nsqueeze \n\n Computes the shape that results from a squeeze operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanMinMaxReduce","title":"Shape.checkCanMinMaxReduce","content":"Shape.checkCanMinMaxReduce \ncheckCanMinMaxReduce \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#checkCanUnsqueeze","title":"Shape.checkCanUnsqueeze","content":"Shape.checkCanUnsqueeze \ncheckCanUnsqueeze \n\n Checks if the given shape is appropriate for an unsqueeze operation and returns the resulting shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#unsqueezeAs","title":"Shape.unsqueezeAs","content":"Shape.unsqueezeAs \nunsqueezeAs \n\n Computes the shape that results from an unsqueezeAs operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#locationToBounds","title":"Shape.locationToBounds","content":"Shape.locationToBounds \nlocationToBounds \n\n Converts the given location to a three-element bounds array in the context of the given shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#flatten","title":"Shape.flatten","content":"Shape.flatten \nflatten \n\n Computes the shape that results from a flatten operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#broadcast2","title":"Shape.broadcast2","content":"Shape.broadcast2 \nbroadcast2 \n\n Finds the shape into which \u0060shape1\u0060 and \u0060shape2\u0060 can be expanded.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#broadcastShapes","title":"Shape.broadcastShapes","content":"Shape.broadcastShapes \nbroadcastShapes \n\n Finds the shape into which all the shapes can be expanded.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#undilatedShape","title":"Shape.undilatedShape","content":"Shape.undilatedShape \nundilatedShape \n\n Computes the shape that results from an undilation operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#complete","title":"Shape.complete","content":"Shape.complete \ncomplete \n\n Completes the given shape with respect to a tensor with the given number of elements.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#completeDim","title":"Shape.completeDim","content":"Shape.completeDim \ncompleteDim \n\n Completes the given shape dimension with respect to a concrete dimension.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#completeDimUnsqueeze","title":"Shape.completeDimUnsqueeze","content":"Shape.completeDimUnsqueeze \ncompleteDimUnsqueeze \n\n Completes the given shape dimension with respect to a concrete dimension, for the unsqueeze operation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#completeExpand","title":"Shape.completeExpand","content":"Shape.completeExpand \ncompleteExpand \n\n Completes the new shape for an expand operation based on the current shape of the tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#completeSliceBounds","title":"Shape.completeSliceBounds","content":"Shape.completeSliceBounds \ncompleteSliceBounds \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#create","title":"Shape.create","content":"Shape.create \ncreate \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve2dKernelSizes","title":"Shape.resolve2dKernelSizes","content":"Shape.resolve2dKernelSizes \nresolve2dKernelSizes \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve3dKernelSizes","title":"Shape.resolve3dKernelSizes","content":"Shape.resolve3dKernelSizes \nresolve3dKernelSizes \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve2dConvSizes","title":"Shape.resolve2dConvSizes","content":"Shape.resolve2dConvSizes \nresolve2dConvSizes \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve3dConvSizes","title":"Shape.resolve3dConvSizes","content":"Shape.resolve3dConvSizes \nresolve3dConvSizes \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve2dConvOutputPadding","title":"Shape.resolve2dConvOutputPadding","content":"Shape.resolve2dConvOutputPadding \nresolve2dConvOutputPadding \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve3dConvOutputPadding","title":"Shape.resolve3dConvOutputPadding","content":"Shape.resolve3dConvOutputPadding \nresolve3dConvOutputPadding \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve2dMaxPoolSizes","title":"Shape.resolve2dMaxPoolSizes","content":"Shape.resolve2dMaxPoolSizes \nresolve2dMaxPoolSizes \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapemodule.html#resolve3dMaxPoolSizes","title":"Shape.resolve3dMaxPoolSizes","content":"Shape.resolve3dMaxPoolSizes \nresolve3dMaxPoolSizes \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html","title":"ShapeAutoOpens","content":"ShapeAutoOpens \n \nShapeAutoOpens.shapeLength \nshapeLength \nShapeAutoOpens.boundsIsScalar \nboundsIsScalar \nShapeAutoOpens.boundsToLocation \nboundsToLocation \nShapeAutoOpens.boundsToShape \nboundsToShape \nShapeAutoOpens.shapeToFullBounds \nshapeToFullBounds \nShapeAutoOpens.mirrorCoordinates \nmirrorCoordinates \nShapeAutoOpens.dilatedCoordinates \ndilatedCoordinates \nShapeAutoOpens.indexToFlatIndex \nindexToFlatIndex \nShapeAutoOpens.flatIndexToIndex \nflatIndexToIndex"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#shapeLength","title":"ShapeAutoOpens.shapeLength","content":"ShapeAutoOpens.shapeLength \nshapeLength \n\n Gets the total number of elements in a shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#boundsIsScalar","title":"ShapeAutoOpens.boundsIsScalar","content":"ShapeAutoOpens.boundsIsScalar \nboundsIsScalar \n\n Checks if the full bounds is a scalar location\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#boundsToLocation","title":"ShapeAutoOpens.boundsToLocation","content":"ShapeAutoOpens.boundsToLocation \nboundsToLocation \n\n Converts the array of three-position bounds specifications to a location.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#boundsToShape","title":"ShapeAutoOpens.boundsToShape","content":"ShapeAutoOpens.boundsToShape \nboundsToShape \n\n Converts the array of three-position bounds specifications to a shape without squeezing out scalars\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#shapeToFullBounds","title":"ShapeAutoOpens.shapeToFullBounds","content":"ShapeAutoOpens.shapeToFullBounds \nshapeToFullBounds \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#mirrorCoordinates","title":"ShapeAutoOpens.mirrorCoordinates","content":"ShapeAutoOpens.mirrorCoordinates \nmirrorCoordinates \n\n Mirrors the coordinates in the given dimensions in the context of the given shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#dilatedCoordinates","title":"ShapeAutoOpens.dilatedCoordinates","content":"ShapeAutoOpens.dilatedCoordinates \ndilatedCoordinates \n\n Dilates the given coordinates.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#indexToFlatIndex","title":"ShapeAutoOpens.indexToFlatIndex","content":"ShapeAutoOpens.indexToFlatIndex \nindexToFlatIndex \n\n Converts the given index to a flat index in the context of the given shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shapeautoopens.html#flatIndexToIndex","title":"ShapeAutoOpens.flatIndexToIndex","content":"ShapeAutoOpens.flatIndexToIndex \nflatIndexToIndex \n\n Converts the given flat index to an index in the context of the given shape.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html","title":"Shorten","content":"Shorten \n \nShorten.gvp \ngvp \nShorten.g \ng \nShorten.hvp \nhvp \nShorten.h \nh \nShorten.gh \ngh \nShorten.ghvp \nghvp \nShorten.jvp \njvp \nShorten.vjp \nvjp \nShorten.j \nj \nShorten.fgvp \nfgvp \nShorten.fg \nfg \nShorten.fgh \nfgh \nShorten.fhvp \nfhvp \nShorten.fh \nfh \nShorten.fghvp \nfghvp \nShorten.fjvp \nfjvp \nShorten.fvjp \nfvjp \nShorten.fj \nfj"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#gvp","title":"Shorten.gvp","content":"Shorten.gvp \ngvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#g","title":"Shorten.g","content":"Shorten.g \ng \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#hvp","title":"Shorten.hvp","content":"Shorten.hvp \nhvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#h","title":"Shorten.h","content":"Shorten.h \nh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#gh","title":"Shorten.gh","content":"Shorten.gh \ngh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#ghvp","title":"Shorten.ghvp","content":"Shorten.ghvp \nghvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#jvp","title":"Shorten.jvp","content":"Shorten.jvp \njvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#vjp","title":"Shorten.vjp","content":"Shorten.vjp \nvjp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#j","title":"Shorten.j","content":"Shorten.j \nj \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fgvp","title":"Shorten.fgvp","content":"Shorten.fgvp \nfgvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fg","title":"Shorten.fg","content":"Shorten.fg \nfg \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fgh","title":"Shorten.fgh","content":"Shorten.fgh \nfgh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fhvp","title":"Shorten.fhvp","content":"Shorten.fhvp \nfhvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fh","title":"Shorten.fh","content":"Shorten.fh \nfh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fghvp","title":"Shorten.fghvp","content":"Shorten.fghvp \nfghvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fjvp","title":"Shorten.fjvp","content":"Shorten.fjvp \nfjvp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fvjp","title":"Shorten.fvjp","content":"Shorten.fvjp \nfvjp \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shorten.html#fj","title":"Shorten.fj","content":"Shorten.fj \nfj \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html","title":"SlicingExtensions","content":"SlicingExtensions \n \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice \nSlicingExtensions.GetSlice \nGetSlice"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-slicingextensions.html#GetSlice","title":"SlicingExtensions.GetSlice","content":"SlicingExtensions.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backend.html","title":"Backend","content":"Backend \n\n Represents a backend for Furnace tensors\n \nBackend.Name \nName \nBackend.Reference \nReference \nBackend.Torch \nTorch \nBackend.Other \nOther"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backend.html#Name","title":"Backend.Name","content":"Backend.Name \nName \n\n Get the name of the backend\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backend.html#Reference","title":"Backend.Reference","content":"Backend.Reference \nReference \n\n The reference backend \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backend.html#Torch","title":"Backend.Torch","content":"Backend.Torch \nTorch \n\n The LibTorch backend \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backend.html#Other","title":"Backend.Other","content":"Backend.Other \nOther \n\n Reserved for future use\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backendfunctionality-1.html","title":"BackendFunctionality\u003C\u0027T\u003E","content":"BackendFunctionality\u003C\u0027T\u003E \n \nBackendFunctionality\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBackendFunctionality\u003C\u0027T\u003E.Get \nGet \nBackendFunctionality\u003C\u0027T\u003E.Backends \nBackends"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backendfunctionality-1.html#\u0060\u0060.ctor\u0060\u0060","title":"BackendFunctionality\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060","content":"BackendFunctionality\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backendfunctionality-1.html#Get","title":"BackendFunctionality\u003C\u0027T\u003E.Get","content":"BackendFunctionality\u003C\u0027T\u003E.Get \nGet \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backendfunctionality-1.html#Backends","title":"BackendFunctionality\u003C\u0027T\u003E.Backends","content":"BackendFunctionality\u003C\u0027T\u003E.Backends \nBackends \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html","title":"BinaryOp","content":"BinaryOp \nDefines a new op implementing a binary function and its derivatives. Instances of this class are used with the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Op\u0022\u003ETensor.Op\u003C/a\u003E method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. \n\u003Cp class=\u0027fsdocs-para\u0027\u003EThis type represents the most generic definition of a new op representing a binary function, allowing the specification of: (1) the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html\u0022\u003ERawTensor\u003C/a\u003E operation, (2) the derivative propagation rule for the forward differentiation mode and (3) the derivative propagation rule for the reverse differentiation mode.\u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003EIn general, if you are implementing a simple elementwise op, you should prefer using the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-binaryopelementwise.html\u0022\u003EBinaryOpElementwise\u003C/a\u003E type, which is much simpler to use.\u003C/p\u003E \nBinaryOp.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBinaryOp.ad_dfda \nad_dfda \nBinaryOp.bd_dfdb \nbd_dfdb \nBinaryOp.fRaw \nfRaw \nBinaryOp.fd_dfda \nfd_dfda \nBinaryOp.fd_dfdb \nfd_dfdb \nBinaryOp.name \nname"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html#\u0060\u0060.ctor\u0060\u0060","title":"BinaryOp.\u0060\u0060.ctor\u0060\u0060","content":"BinaryOp.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html#ad_dfda","title":"BinaryOp.ad_dfda","content":"BinaryOp.ad_dfda \nad_dfda \nDerivative propagation rule for forward differentiation mode for the partial derivative with respect to the first argument of the function. This represents the contribution of the function\u0027s first argument \\( a \\) to the derivative of \\( f(a, b) \\) with respect a value \\( x \\) earlier in the computation graph than the function\u0027s arguments. In other words, it computes the first term in the right-hand side of the equation \\( \\frac{\\partial f(a, b)}{\\partial x} = \\frac{\\partial a}{\\partial x} \\frac{\\partial f(a, b)}{\\partial a} \u002B \\frac{\\partial b}{\\partial x} \\frac{\\partial f(a, b)}{\\partial b} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html#bd_dfdb","title":"BinaryOp.bd_dfdb","content":"BinaryOp.bd_dfdb \nbd_dfdb \nDerivative propagation rule for forward differentiation mode for the partial derivative with respect to the second argument of the function. This represents the contribution of the function\u0027s second argument \\( b \\) to the derivative of \\( f(a, b) \\) with respect a value \\( x \\) earlier in the computation graph than the function\u0027s arguments. In other words, it computes the second term in the right-hand side of the equation \\( \\frac{\\partial f(a, b)}{\\partial x} = \\frac{\\partial a}{\\partial x} \\frac{\\partial f(a, b)}{\\partial a} \u002B \\frac{\\partial b}{\\partial x} \\frac{\\partial f(a, b)}{\\partial b} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html#fRaw","title":"BinaryOp.fRaw","content":"BinaryOp.fRaw \nfRaw \nRawTensor operation \\( f(a, b) \\) performing the op."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html#fd_dfda","title":"BinaryOp.fd_dfda","content":"BinaryOp.fd_dfda \nfd_dfda \nDerivative propagation rule for reverse differentiation mode for the partial derivative with respect to the first argument of the function. This represents the derivative of a value \\( y \\), which comes later in the computation graph than the function\u0027s value \\( f(a, b) \\), with respect to the function\u0027s first argument \\( a \\). In other words, it computes \\( \\frac{\\partial y}{\\partial a} = \\frac{\\partial y}{\\partial f(a, b)} \\frac{\\partial f(a, b)}{\\partial a} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html#fd_dfdb","title":"BinaryOp.fd_dfdb","content":"BinaryOp.fd_dfdb \nfd_dfdb \nDerivative propagation rule for reverse differentiation mode for the partial derivative with respect to the second argument of the function. This represents the derivative of a value \\( y \\), which comes later in the computation graph than the function\u0027s value \\( f(a, b) \\), with respect to the function\u0027s second argument \\( b \\). In other words, it computes \\( \\frac{\\partial y}{\\partial b} = \\frac{\\partial y}{\\partial f(a, b)} \\frac{\\partial f(a, b)}{\\partial b} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html#name","title":"BinaryOp.name","content":"BinaryOp.name \nname \n\n Name of the op.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryopelementwise.html","title":"BinaryOpElementwise","content":"BinaryOpElementwise \nDefines a new op implementing an elementwise binary function and its derivatives. Instances of this class are used with the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Op\u0022\u003ETensor.Op\u003C/a\u003E method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. \n\n This type is specialized to elementwise ops. It requires the user to specify only (1) the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html\u0022\u003ERawTensor\u003C/a\u003E operation and (2) the derivative of the function with respect to each argument. The corresponding derivative propagation rules for the forward and reverse differentiation modes are automatically generated.\n \u003Cp class=\u0027fsdocs-para\u0027\u003EIf you are implementing a complex op that is not elementwise, you can use the generic type \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-binaryop.html\u0022\u003EBinaryOp\u003C/a\u003E, which allows you to define the full derivative propagation rules.\u003C/p\u003E \nBinaryOpElementwise.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBinaryOpElementwise.dfda \ndfda \nBinaryOpElementwise.dfdb \ndfdb"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryopelementwise.html#\u0060\u0060.ctor\u0060\u0060","title":"BinaryOpElementwise.\u0060\u0060.ctor\u0060\u0060","content":"BinaryOpElementwise.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryopelementwise.html#dfda","title":"BinaryOpElementwise.dfda","content":"BinaryOpElementwise.dfda \ndfda \nDerivative of the function with respect to its first argument, \\( \\frac{\\partial f(a, b)}{\\partial a} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-binaryopelementwise.html#dfdb","title":"BinaryOpElementwise.dfdb","content":"BinaryOpElementwise.dfdb \ndfdb \nDerivative of the function with respect to its second argument, \\( \\frac{\\partial f(a, b)}{\\partial b} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-device.html","title":"Device","content":"Device \n\n Represents a device specification.\n \nDevice.DeviceType \nDeviceType \nDevice.DeviceIndex \nDeviceIndex \nDevice.CPU \nCPU \nDevice.GPU \nGPU \nDevice.Device \nDevice"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-device.html#DeviceType","title":"Device.DeviceType","content":"Device.DeviceType \nDeviceType \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-device.html#DeviceIndex","title":"Device.DeviceIndex","content":"Device.DeviceIndex \nDeviceIndex \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-device.html#CPU","title":"Device.CPU","content":"Device.CPU \nCPU \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-device.html#GPU","title":"Device.GPU","content":"Device.GPU \nGPU \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-device.html#Device","title":"Device.Device","content":"Device.Device \nDevice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html","title":"DeviceType","content":"DeviceType \n\n Represents the type of a device. \n \n\n The numeric values used are as for LibTorch.\n \nDeviceType.CPU \nCPU \nDeviceType.CUDA \nCUDA \nDeviceType.MKLDNN \nMKLDNN \nDeviceType.OPENGL \nOPENGL \nDeviceType.OPENCL \nOPENCL \nDeviceType.IDEEP \nIDEEP \nDeviceType.HIP \nHIP \nDeviceType.FPGA \nFPGA \nDeviceType.MSNPU \nMSNPU \nDeviceType.XLA \nXLA"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#CPU","title":"DeviceType.CPU","content":"DeviceType.CPU \nCPU \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#CUDA","title":"DeviceType.CUDA","content":"DeviceType.CUDA \nCUDA \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#MKLDNN","title":"DeviceType.MKLDNN","content":"DeviceType.MKLDNN \nMKLDNN \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#OPENGL","title":"DeviceType.OPENGL","content":"DeviceType.OPENGL \nOPENGL \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#OPENCL","title":"DeviceType.OPENCL","content":"DeviceType.OPENCL \nOPENCL \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#IDEEP","title":"DeviceType.IDEEP","content":"DeviceType.IDEEP \nIDEEP \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#HIP","title":"DeviceType.HIP","content":"DeviceType.HIP \nHIP \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#FPGA","title":"DeviceType.FPGA","content":"DeviceType.FPGA \nFPGA \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#MSNPU","title":"DeviceType.MSNPU","content":"DeviceType.MSNPU \nMSNPU \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-devicetype.html#XLA","title":"DeviceType.XLA","content":"DeviceType.XLA \nXLA \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html","title":"Dtype","content":"Dtype \n\n Represents a storage type for elements of a tensor\n \nDtype.SummationType \nSummationType \nDtype.BFloat16 \nBFloat16 \nDtype.Float16 \nFloat16 \nDtype.Float32 \nFloat32 \nDtype.Float64 \nFloat64 \nDtype.Int8 \nInt8 \nDtype.Byte \nByte \nDtype.Int16 \nInt16 \nDtype.Int32 \nInt32 \nDtype.Int64 \nInt64 \nDtype.Bool \nBool"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#SummationType","title":"Dtype.SummationType","content":"Dtype.SummationType \nSummationType \n\n Gets the natural result of the Sum(), SumToSize() and Sum(dim) operation on this dtype\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#BFloat16","title":"Dtype.BFloat16","content":"Dtype.BFloat16 \nBFloat16 \n\n Store elements as 16-bit floating point numbers (bfloat16 variation)\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Float16","title":"Dtype.Float16","content":"Dtype.Float16 \nFloat16 \n\n Store elements as 16-bit floating point numbers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Float32","title":"Dtype.Float32","content":"Dtype.Float32 \nFloat32 \n\n Store elements as 32-bit floating point numbers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Float64","title":"Dtype.Float64","content":"Dtype.Float64 \nFloat64 \n\n Store elements as 64-bit floating point numbers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Int8","title":"Dtype.Int8","content":"Dtype.Int8 \nInt8 \n\n Store elements as 8-bit integers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Byte","title":"Dtype.Byte","content":"Dtype.Byte \nByte \n\n Store elements as 8-bit unsigned integers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Int16","title":"Dtype.Int16","content":"Dtype.Int16 \nInt16 \n\n Store elements as 16-bit signed integers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Int32","title":"Dtype.Int32","content":"Dtype.Int32 \nInt32 \n\n Store elements as 32-bit signed integers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Int64","title":"Dtype.Int64","content":"Dtype.Int64 \nInt64 \n\n Store elements as 64-bit signed integers\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-dtype.html#Bool","title":"Dtype.Bool","content":"Dtype.Bool \nBool \n\n Store elements as booleans\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html","title":"FurnaceImage","content":"FurnaceImage \n\n Tensor operations\n \nFurnaceImage.abs \nabs \nFurnaceImage.acos \nacos \nFurnaceImage.add \nadd \nFurnaceImage.arange \narange \nFurnaceImage.arange \narange \nFurnaceImage.arangeLike \narangeLike \nFurnaceImage.arangeLike \narangeLike \nFurnaceImage.argmax \nargmax \nFurnaceImage.argmax \nargmax \nFurnaceImage.argmin \nargmin \nFurnaceImage.argmin \nargmin \nFurnaceImage.asin \nasin \nFurnaceImage.atan \natan \nFurnaceImage.backends \nbackends \nFurnaceImage.backendsAndDevices \nbackendsAndDevices \nFurnaceImage.bceLoss \nbceLoss \nFurnaceImage.bernoulli \nbernoulli \nFurnaceImage.cast \ncast \nFurnaceImage.cat \ncat \nFurnaceImage.ceil \nceil \nFurnaceImage.clamp \nclamp \nFurnaceImage.clone \nclone \nFurnaceImage.config \nconfig \nFurnaceImage.config \nconfig \nFurnaceImage.config \nconfig \nFurnaceImage.conv1d \nconv1d \nFurnaceImage.conv2d \nconv2d \nFurnaceImage.conv3d \nconv3d \nFurnaceImage.convTranspose1d \nconvTranspose1d \nFurnaceImage.convTranspose2d \nconvTranspose2d \nFurnaceImage.convTranspose3d \nconvTranspose3d \nFurnaceImage.corrcoef \ncorrcoef \nFurnaceImage.cos \ncos \nFurnaceImage.cosh \ncosh \nFurnaceImage.cov \ncov \nFurnaceImage.create \ncreate \nFurnaceImage.crossEntropyLoss \ncrossEntropyLoss \nFurnaceImage.curl \ncurl \nFurnaceImage.curldivergence \ncurldivergence \nFurnaceImage.derivative \nderivative \nFurnaceImage.devices \ndevices \nFurnaceImage.diagonal \ndiagonal \nFurnaceImage.diff \ndiff \nFurnaceImage.diff2 \ndiff2 \nFurnaceImage.diffn \ndiffn \nFurnaceImage.dilate \ndilate \nFurnaceImage.div \ndiv \nFurnaceImage.divergence \ndivergence \nFurnaceImage.dot \ndot \nFurnaceImage.dropout \ndropout \nFurnaceImage.dropout2d \ndropout2d \nFurnaceImage.dropout3d \ndropout3d \nFurnaceImage.empty \nempty \nFurnaceImage.empty \nempty \nFurnaceImage.empty \nempty \nFurnaceImage.eq \neq \nFurnaceImage.evalForwardDiff \nevalForwardDiff \nFurnaceImage.evalForwardDiffs \nevalForwardDiffs \nFurnaceImage.evalReverseDiff \nevalReverseDiff \nFurnaceImage.exp \nexp \nFurnaceImage.expand \nexpand \nFurnaceImage.expandAs \nexpandAs \nFurnaceImage.eye \neye \nFurnaceImage.fcurl \nfcurl \nFurnaceImage.fcurldivergence \nfcurldivergence \nFurnaceImage.fdiff \nfdiff \nFurnaceImage.fdiff2 \nfdiff2 \nFurnaceImage.fdiffn \nfdiffn \nFurnaceImage.fdivergence \nfdivergence \nFurnaceImage.ffdiffn \nffdiffn \nFurnaceImage.fgrad \nfgrad \nFurnaceImage.fgradhessian \nfgradhessian \nFurnaceImage.fgradhessianv \nfgradhessianv \nFurnaceImage.fgradv \nfgradv \nFurnaceImage.fhessian \nfhessian \nFurnaceImage.fhessianv \nfhessianv \nFurnaceImage.fjacobian \nfjacobian \nFurnaceImage.fjacobianTv \nfjacobianTv \nFurnaceImage.fjacobianv \nfjacobianv \nFurnaceImage.flaplacian \nflaplacian \nFurnaceImage.flatten \nflatten \nFurnaceImage.flip \nflip \nFurnaceImage.floor \nfloor \nFurnaceImage.forwardDiff \nforwardDiff \nFurnaceImage.full \nfull \nFurnaceImage.full \nfull \nFurnaceImage.fullLike \nfullLike \nFurnaceImage.gather \ngather \nFurnaceImage.ge \nge \nFurnaceImage.grad \ngrad \nFurnaceImage.gradhessian \ngradhessian \nFurnaceImage.gradhessianv \ngradhessianv \nFurnaceImage.gradv \ngradv \nFurnaceImage.gt \ngt \nFurnaceImage.hasinf \nhasinf \nFurnaceImage.hasnan \nhasnan \nFurnaceImage.hessian \nhessian \nFurnaceImage.hessianv \nhessianv \nFurnaceImage.init \ninit \nFurnaceImage.init2d \ninit2d \nFurnaceImage.init3d \ninit3d \nFurnaceImage.init4d \ninit4d \nFurnaceImage.isBackendAvailable \nisBackendAvailable \nFurnaceImage.isCudaAvailable \nisCudaAvailable \nFurnaceImage.isDeviceAvailable \nisDeviceAvailable \nFurnaceImage.isDeviceTypeAvailable \nisDeviceTypeAvailable \nFurnaceImage.isTensor \nisTensor \nFurnaceImage.isinf \nisinf \nFurnaceImage.isnan \nisnan \nFurnaceImage.jacobian \njacobian \nFurnaceImage.jacobianTv \njacobianTv \nFurnaceImage.jacobianv \njacobianv \nFurnaceImage.laplacian \nlaplacian \nFurnaceImage.le \nle \nFurnaceImage.leakyRelu \nleakyRelu \nFurnaceImage.like \nlike \nFurnaceImage.linspace \nlinspace \nFurnaceImage.linspace \nlinspace \nFurnaceImage.load \nload \nFurnaceImage.log \nlog \nFurnaceImage.log10 \nlog10 \nFurnaceImage.logsoftmax \nlogsoftmax \nFurnaceImage.logspace \nlogspace \nFurnaceImage.logspace \nlogspace \nFurnaceImage.logsumexp \nlogsumexp \nFurnaceImage.lt \nlt \nFurnaceImage.map \nmap \nFurnaceImage.map2 \nmap2 \nFurnaceImage.map3 \nmap3 \nFurnaceImage.mapi \nmapi \nFurnaceImage.mapi2 \nmapi2 \nFurnaceImage.mapi3 \nmapi3 \nFurnaceImage.matmul \nmatmul \nFurnaceImage.max \nmax \nFurnaceImage.max \nmax \nFurnaceImage.max \nmax \nFurnaceImage.maxpool1d \nmaxpool1d \nFurnaceImage.maxpool1di \nmaxpool1di \nFurnaceImage.maxpool2d \nmaxpool2d \nFurnaceImage.maxpool2di \nmaxpool2di \nFurnaceImage.maxpool3d \nmaxpool3d \nFurnaceImage.maxpool3di \nmaxpool3di \nFurnaceImage.maxunpool1d \nmaxunpool1d \nFurnaceImage.maxunpool2d \nmaxunpool2d \nFurnaceImage.maxunpool3d \nmaxunpool3d \nFurnaceImage.mean \nmean \nFurnaceImage.mean \nmean \nFurnaceImage.min \nmin \nFurnaceImage.min \nmin \nFurnaceImage.min \nmin \nFurnaceImage.move \nmove \nFurnaceImage.mseLoss \nmseLoss \nFurnaceImage.mul \nmul \nFurnaceImage.multinomial \nmultinomial \nFurnaceImage.ne \nne \nFurnaceImage.neg \nneg \nFurnaceImage.nelement \nnelement \nFurnaceImage.nest \nnest \nFurnaceImage.nest \nnest \nFurnaceImage.nestLevel \nnestLevel \nFurnaceImage.nestReset \nnestReset \nFurnaceImage.nllLoss \nnllLoss \nFurnaceImage.noDiff \nnoDiff \nFurnaceImage.normalize \nnormalize \nFurnaceImage.one \none \nFurnaceImage.oneLike \noneLike \nFurnaceImage.onehot \nonehot \nFurnaceImage.onehotLike \nonehotLike \nFurnaceImage.ones \nones \nFurnaceImage.ones \nones \nFurnaceImage.onesLike \nonesLike \nFurnaceImage.pad \npad \nFurnaceImage.permute \npermute \nFurnaceImage.pow \npow \nFurnaceImage.primal \nprimal \nFurnaceImage.primalDerivative \nprimalDerivative \nFurnaceImage.rand \nrand \nFurnaceImage.rand \nrand \nFurnaceImage.randLike \nrandLike \nFurnaceImage.randint \nrandint \nFurnaceImage.randint \nrandint \nFurnaceImage.randintLike \nrandintLike \nFurnaceImage.randn \nrandn \nFurnaceImage.randn \nrandn \nFurnaceImage.randnLike \nrandnLike \nFurnaceImage.relu \nrelu \nFurnaceImage.repeat \nrepeat \nFurnaceImage.reverse \nreverse \nFurnaceImage.reverseDiff \nreverseDiff \nFurnaceImage.reversePush \nreversePush \nFurnaceImage.reverseReset \nreverseReset \nFurnaceImage.round \nround \nFurnaceImage.safelog \nsafelog \nFurnaceImage.save \nsave \nFurnaceImage.scalar \nscalar \nFurnaceImage.scatter \nscatter \nFurnaceImage.seed \nseed \nFurnaceImage.sigmoid \nsigmoid \nFurnaceImage.sign \nsign \nFurnaceImage.sin \nsin \nFurnaceImage.sinh \nsinh \nFurnaceImage.slice \nslice \nFurnaceImage.softmax \nsoftmax \nFurnaceImage.softplus \nsoftplus \nFurnaceImage.split \nsplit \nFurnaceImage.sqrt \nsqrt \nFurnaceImage.squeeze \nsqueeze \nFurnaceImage.stack \nstack \nFurnaceImage.standardize \nstandardize \nFurnaceImage.std \nstd \nFurnaceImage.std \nstd \nFurnaceImage.sub \nsub \nFurnaceImage.sum \nsum \nFurnaceImage.sum \nsum \nFurnaceImage.tan \ntan \nFurnaceImage.tanh \ntanh \nFurnaceImage.tensor \ntensor \nFurnaceImage.toImage \ntoImage \nFurnaceImage.toImageString \ntoImageString \nFurnaceImage.trace \ntrace \nFurnaceImage.transpose \ntranspose \nFurnaceImage.transpose \ntranspose \nFurnaceImage.undilate \nundilate \nFurnaceImage.unflatten \nunflatten \nFurnaceImage.unsqueeze \nunsqueeze \nFurnaceImage.unsqueezeAs \nunsqueezeAs \nFurnaceImage.unstack \nunstack \nFurnaceImage.var \nvar \nFurnaceImage.var \nvar \nFurnaceImage.view \nview \nFurnaceImage.view \nview \nFurnaceImage.viewAs \nviewAs \nFurnaceImage.zero \nzero \nFurnaceImage.zeroCreate \nzeroCreate \nFurnaceImage.zeroLike \nzeroLike \nFurnaceImage.zeros \nzeros \nFurnaceImage.zeros \nzeros \nFurnaceImage.zerosLike \nzerosLike \nFurnaceImage.version \nversion"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#abs","title":"FurnaceImage.abs","content":"FurnaceImage.abs \nabs \nComputes the element-wise absolute value of the given input tensor. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#acos","title":"FurnaceImage.acos","content":"FurnaceImage.acos \nacos \nReturns a new tensor with the arccosine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#add","title":"FurnaceImage.add","content":"FurnaceImage.add \nadd \nReturn the element-wise addition of the two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#arange","title":"FurnaceImage.arange","content":"FurnaceImage.arange \narange \n\n Returns a 1-D tensor of size \\(\\left\\lceil \\frac{\\text{end} - \\text{start}}{\\text{step}} \\right\\rceil\\)\n with values from the interval [start, end) taken with common difference step beginning from start.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#arange","title":"FurnaceImage.arange","content":"FurnaceImage.arange \narange \n\n Returns a 1-D tensor of size \\(\\left\\lceil \\frac{\\text{end} - \\text{start}}{\\text{step}} \\right\\rceil\\)\n with values from the interval [start, end) taken with common difference step beginning from start.\n \n\n Non-integer steps may be subject to floating point rounding errors when comparing against end.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#arangeLike","title":"FurnaceImage.arangeLike","content":"FurnaceImage.arangeLike \narangeLike \n\n A version of FurnaceImage.arange with characteristics based on the input tensor.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#arangeLike","title":"FurnaceImage.arangeLike","content":"FurnaceImage.arangeLike \narangeLike \n\n A version of FurnaceImage.arange with characteristics based on the input tensor.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#argmax","title":"FurnaceImage.argmax","content":"FurnaceImage.argmax \nargmax \nReturns the indices of the maximum value of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#argmax","title":"FurnaceImage.argmax","content":"FurnaceImage.argmax \nargmax \nReturns the indices of the maximum value of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#argmin","title":"FurnaceImage.argmin","content":"FurnaceImage.argmin \nargmin \nReturns the indices of the minimum value of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#argmin","title":"FurnaceImage.argmin","content":"FurnaceImage.argmin \nargmin \nReturns the indices of the minimum value of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#asin","title":"FurnaceImage.asin","content":"FurnaceImage.asin \nasin \nReturns a new tensor with the arcsine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#atan","title":"FurnaceImage.atan","content":"FurnaceImage.atan \natan \nReturns a new tensor with the arctangent of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#backends","title":"FurnaceImage.backends","content":"FurnaceImage.backends \nbackends \nReturns the list of available backends."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#backendsAndDevices","title":"FurnaceImage.backendsAndDevices","content":"FurnaceImage.backendsAndDevices \nbackendsAndDevices \nReturns the list of available backends and devices available for each backend."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#bceLoss","title":"FurnaceImage.bceLoss","content":"FurnaceImage.bceLoss \nbceLoss \nCreates a criterion that measures the Binary Cross Entropy between the target and the output"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#bernoulli","title":"FurnaceImage.bernoulli","content":"FurnaceImage.bernoulli \nbernoulli \nDraws binary random numbers (0 or 1) from a Bernoulli distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#cast","title":"FurnaceImage.cast","content":"FurnaceImage.cast \ncast \nConvert the tensor to one with the given element type. \nIf the element type is unchanged the input tensor will be returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#cat","title":"FurnaceImage.cat","content":"FurnaceImage.cat \ncat \nConcatenates the given sequence of seq tensors in the given dimension. All tensors must either have the same shape (except in the concatenating dimension) or be empty."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#ceil","title":"FurnaceImage.ceil","content":"FurnaceImage.ceil \nceil \nReturns a new tensor with the ceil of the elements of input, the smallest integer greater than or equal to each element. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#clamp","title":"FurnaceImage.clamp","content":"FurnaceImage.clamp \nclamp \nClamp all elements in input into the range [ low..high] and return a resulting tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#clone","title":"FurnaceImage.clone","content":"FurnaceImage.clone \nclone \nReturns a new tensor with the same characteristics and storage cloned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#config","title":"FurnaceImage.config","content":"FurnaceImage.config \nconfig \nConfigure the default device, element type, backend, printer. Only floating point dtypes are supported as the default."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#config","title":"FurnaceImage.config","content":"FurnaceImage.config \nconfig \nReturn the current default device, element type, backend, and printer."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#config","title":"FurnaceImage.config","content":"FurnaceImage.config \nconfig \nConfigure the default device, dtype, and/or backend."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#conv1d","title":"FurnaceImage.conv1d","content":"FurnaceImage.conv1d \nconv1d \nApplies a 1D convolution over an input signal composed of several input planes"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#conv2d","title":"FurnaceImage.conv2d","content":"FurnaceImage.conv2d \nconv2d \nApplies a 2D convolution over an input signal composed of several input planes"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#conv3d","title":"FurnaceImage.conv3d","content":"FurnaceImage.conv3d \nconv3d \nApplies a 3D convolution over an input signal composed of several input planes"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#convTranspose1d","title":"FurnaceImage.convTranspose1d","content":"FurnaceImage.convTranspose1d \nconvTranspose1d \nApplies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called \u0027deconvolution\u0027."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#convTranspose2d","title":"FurnaceImage.convTranspose2d","content":"FurnaceImage.convTranspose2d \nconvTranspose2d \nApplies a 2D transposed convolution operator over an input signal composed of several input planes, sometimes also called \u0027deconvolution\u0027."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#convTranspose3d","title":"FurnaceImage.convTranspose3d","content":"FurnaceImage.convTranspose3d \nconvTranspose3d \nApplies a 3D transposed convolution operator over an input signal composed of several input planes, sometimes also called \u0027deconvolution\u0027."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#corrcoef","title":"FurnaceImage.corrcoef","content":"FurnaceImage.corrcoef \ncorrcoef \n\n Estimates the Pearson correlation coefficient matrix for the given tensor. The tensor\u0027s first\n dimension should index variables and the second dimension should\n index observations for each variable.\n \n\n The correlation between variables \\(x\\) and \\(y\\) is\n \\[cor(x,y)= \\frac{\\sum^{N}_{i = 1}(x_{i} - \\mu_x)(y_{i} - \\mu_y)}{\\sigma_x \\sigma_y (N ~-~1)}\\]\n where \\(\\mu_x\\) and \\(\\mu_y\\) are the sample means and \\(\\sigma_x\\) and \\(\\sigma_x\\) are \n the sample standard deviations.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#cos","title":"FurnaceImage.cos","content":"FurnaceImage.cos \ncos \nReturns a new tensor with the cosine of the elements of input"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#cosh","title":"FurnaceImage.cosh","content":"FurnaceImage.cosh \ncosh \nReturns a new tensor with the hyperbolic cosine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#cov","title":"FurnaceImage.cov","content":"FurnaceImage.cov \ncov \n\n Estimates the covariance matrix of the given tensor. The tensor\u0027s first\n dimension should index variables and the second dimension should\n index observations for each variable.\n \n\n If no weights are given, the covariance between variables \\(x\\) and \\(y\\) is\n \\[cov(x,y)= \\frac{\\sum^{N}_{i = 1}(x_{i} - \\mu_x)(y_{i} - \\mu_y)}{N~-~\\text{correction}}\\]\n where \\(\\mu_x\\) and \\(\\mu_y\\) are the sample means.\n \n If there are fweights or aweights then the covariance is\n \\[cov(x,y)=\\frac{\\sum^{N}_{i = 1}w_i(x_{i} - \\mu_x^*)(y_{i} - \\mu_y^*)}{\\text{normalization factor}}\\]\n where \\(w\\) is either fweights or aweights if one weight type is provided.\n If both weight types are provided \\(w=\\text{fweights}\\times\\text{aweights}\\). \n \\(\\mu_x^* = \\frac{\\sum^{N}_{i = 1}w_ix_{i} }{\\sum^{N}_{i = 1}w_i}\\)\n is the weighted mean of variables.\n The normalization factor is \\(\\sum^{N}_{i=1} w_i\\) if only fweights are provided or if aweights are provided and \u003Ccode\u003Ecorrection=0\u003C/code\u003E. \n Otherwise if aweights \\(aw\\) are provided the normalization factor is\n \\(\\sum^N_{i=1} w_i - \\text{correction}\\times\\frac{\\sum^N_{i=1} w_i aw_i}{\\sum^N_{i=1} w_i}\\) \n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#create","title":"FurnaceImage.create","content":"FurnaceImage.create \ncreate \nCreate a new 1D tensor using the given value for each element."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#crossEntropyLoss","title":"FurnaceImage.crossEntropyLoss","content":"FurnaceImage.crossEntropyLoss \ncrossEntropyLoss \nThis criterion combines logsoftmax and nllLoss in a single function"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#curl","title":"FurnaceImage.curl","content":"FurnaceImage.curl \ncurl \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#curldivergence","title":"FurnaceImage.curldivergence","content":"FurnaceImage.curldivergence \ncurldivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#derivative","title":"FurnaceImage.derivative","content":"FurnaceImage.derivative \nderivative \nGet the derivative value of the tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#devices","title":"FurnaceImage.devices","content":"FurnaceImage.devices \ndevices \nReturns the list of available devices for a given backend."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#diagonal","title":"FurnaceImage.diagonal","content":"FurnaceImage.diagonal \ndiagonal \n\n Returns a tensor with the diagonal elements with respect to \u003Ccode\u003Edim1\u003C/code\u003E and \u003Ccode\u003Edim2\u003C/code\u003E.\n The argument offset controls which diagonal to consider.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#diff","title":"FurnaceImage.diff","content":"FurnaceImage.diff \ndiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#diff2","title":"FurnaceImage.diff2","content":"FurnaceImage.diff2 \ndiff2 \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#diffn","title":"FurnaceImage.diffn","content":"FurnaceImage.diffn \ndiffn \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#dilate","title":"FurnaceImage.dilate","content":"FurnaceImage.dilate \ndilate \nDilate the tensor in using the given dilations in each corresponding dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#div","title":"FurnaceImage.div","content":"FurnaceImage.div \ndiv \nReturn the element-wise division of the two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#divergence","title":"FurnaceImage.divergence","content":"FurnaceImage.divergence \ndivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#dot","title":"FurnaceImage.dot","content":"FurnaceImage.dot \ndot \nComputes the dot product (inner product) of two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#dropout","title":"FurnaceImage.dropout","content":"FurnaceImage.dropout \ndropout \nRandomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#dropout2d","title":"FurnaceImage.dropout2d","content":"FurnaceImage.dropout2d \ndropout2d \nRandomly zero out entire channels (a channel is a 2D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 2D tensor \\text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#dropout3d","title":"FurnaceImage.dropout3d","content":"FurnaceImage.dropout3d \ndropout3d \nRandomly zero out entire channels (a channel is a 3D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 3D tensor \\text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#empty","title":"FurnaceImage.empty","content":"FurnaceImage.empty \nempty \nReturns a new empty tensor holding no data, for the given element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#empty","title":"FurnaceImage.empty","content":"FurnaceImage.empty \nempty \nReturns a new uninitialized tensor filled with arbitrary values for the given length, element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#empty","title":"FurnaceImage.empty","content":"FurnaceImage.empty \nempty \nReturns a new uninitialized tensor filled with arbitrary values for the given shape, element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#eq","title":"FurnaceImage.eq","content":"FurnaceImage.eq \neq \nReturns a boolean tensor for the element-wise equality comparison of the elements in the two tensors. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#evalForwardDiff","title":"FurnaceImage.evalForwardDiff","content":"FurnaceImage.evalForwardDiff \nevalForwardDiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#evalForwardDiffs","title":"FurnaceImage.evalForwardDiffs","content":"FurnaceImage.evalForwardDiffs \nevalForwardDiffs \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#evalReverseDiff","title":"FurnaceImage.evalReverseDiff","content":"FurnaceImage.evalReverseDiff \nevalReverseDiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#exp","title":"FurnaceImage.exp","content":"FurnaceImage.exp \nexp \nApplies the exp function element-wise."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#expand","title":"FurnaceImage.expand","content":"FurnaceImage.expand \nexpand \nReturns a new view of the input tensor with singleton dimensions expanded to a larger size"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#expandAs","title":"FurnaceImage.expandAs","content":"FurnaceImage.expandAs \nexpandAs \nExpand the input tensor to the same size as other tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#eye","title":"FurnaceImage.eye","content":"FurnaceImage.eye \neye \nReturns a 2-D tensor with ones on the diagonal and zeros elsewhere."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fcurl","title":"FurnaceImage.fcurl","content":"FurnaceImage.fcurl \nfcurl \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fcurldivergence","title":"FurnaceImage.fcurldivergence","content":"FurnaceImage.fcurldivergence \nfcurldivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fdiff","title":"FurnaceImage.fdiff","content":"FurnaceImage.fdiff \nfdiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fdiff2","title":"FurnaceImage.fdiff2","content":"FurnaceImage.fdiff2 \nfdiff2 \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fdiffn","title":"FurnaceImage.fdiffn","content":"FurnaceImage.fdiffn \nfdiffn \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fdivergence","title":"FurnaceImage.fdivergence","content":"FurnaceImage.fdivergence \nfdivergence \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#ffdiffn","title":"FurnaceImage.ffdiffn","content":"FurnaceImage.ffdiffn \nffdiffn \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fgrad","title":"FurnaceImage.fgrad","content":"FurnaceImage.fgrad \nfgrad \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fgradhessian","title":"FurnaceImage.fgradhessian","content":"FurnaceImage.fgradhessian \nfgradhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fgradhessianv","title":"FurnaceImage.fgradhessianv","content":"FurnaceImage.fgradhessianv \nfgradhessianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fgradv","title":"FurnaceImage.fgradv","content":"FurnaceImage.fgradv \nfgradv \nTBD \nThe \u003Ccode\u003Ex\u003C/code\u003E and \u003Ccode\u003Ev\u003C/code\u003E tensors should have the same number of elements."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fhessian","title":"FurnaceImage.fhessian","content":"FurnaceImage.fhessian \nfhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fhessianv","title":"FurnaceImage.fhessianv","content":"FurnaceImage.fhessianv \nfhessianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fjacobian","title":"FurnaceImage.fjacobian","content":"FurnaceImage.fjacobian \nfjacobian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fjacobianTv","title":"FurnaceImage.fjacobianTv","content":"FurnaceImage.fjacobianTv \nfjacobianTv \nOriginal value and transposed Jacobian-vector product of a vector-to-vector function \u0060f\u0060, at point \u0060x\u0060, along vector \u0060v\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fjacobianv","title":"FurnaceImage.fjacobianv","content":"FurnaceImage.fjacobianv \nfjacobianv \nTBD \nThe \u003Ccode\u003Ex\u003C/code\u003E and \u003Ccode\u003Ev\u003C/code\u003E tensors should have the same number of elements."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#flaplacian","title":"FurnaceImage.flaplacian","content":"FurnaceImage.flaplacian \nflaplacian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#flatten","title":"FurnaceImage.flatten","content":"FurnaceImage.flatten \nflatten \nFlattens a contiguous range of dims in a tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#flip","title":"FurnaceImage.flip","content":"FurnaceImage.flip \nflip \nReverse the order of a n-D tensor along given axis in dims"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#floor","title":"FurnaceImage.floor","content":"FurnaceImage.floor \nfloor \nReturns a new tensor with the floor of the elements of input, the largest integer less than or equal to each element. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#forwardDiff","title":"FurnaceImage.forwardDiff","content":"FurnaceImage.forwardDiff \nforwardDiff \nProduce a new tensor suitable for calculating the forward-mode derivative at the given level tag."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#full","title":"FurnaceImage.full","content":"FurnaceImage.full \nfull \nReturns a new tensor of the given length filled with \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Evalue\u003C/span\u003E, for the given element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#full","title":"FurnaceImage.full","content":"FurnaceImage.full \nfull \nReturns a new tensor filled with the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Evalue\u003C/span\u003E, for the given shape, element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#fullLike","title":"FurnaceImage.fullLike","content":"FurnaceImage.fullLike \nfullLike \nReturns a new tensor filled with the given scalar value with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#gather","title":"FurnaceImage.gather","content":"FurnaceImage.gather \ngather \nGathers values along an axis specified by dim."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#ge","title":"FurnaceImage.ge","content":"FurnaceImage.ge \nge \nReturns a boolean tensor for the element-wise greater-than-or-equal comparison of the elements in the two tensors. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#grad","title":"FurnaceImage.grad","content":"FurnaceImage.grad \ngrad \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#gradhessian","title":"FurnaceImage.gradhessian","content":"FurnaceImage.gradhessian \ngradhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#gradhessianv","title":"FurnaceImage.gradhessianv","content":"FurnaceImage.gradhessianv \ngradhessianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#gradv","title":"FurnaceImage.gradv","content":"FurnaceImage.gradv \ngradv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#gt","title":"FurnaceImage.gt","content":"FurnaceImage.gt \ngt \nReturns a boolean tensor for the element-wise greater-than comparison of the elements in the two tensors. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#hasinf","title":"FurnaceImage.hasinf","content":"FurnaceImage.hasinf \nhasinf \nReturns a boolean indicating if any element of the tensor is infinite."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#hasnan","title":"FurnaceImage.hasnan","content":"FurnaceImage.hasnan \nhasnan \nReturns a boolean indicating if any element of the tensor is a not-a-number (NaN) value."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#hessian","title":"FurnaceImage.hessian","content":"FurnaceImage.hessian \nhessian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#hessianv","title":"FurnaceImage.hessianv","content":"FurnaceImage.hessianv \nhessianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#init","title":"FurnaceImage.init","content":"FurnaceImage.init \ninit \nCreate a new 1D tensor using the given initializer for each element."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#init2d","title":"FurnaceImage.init2d","content":"FurnaceImage.init2d \ninit2d \nCreate a new 2D tensor using the given initializer for each element."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#init3d","title":"FurnaceImage.init3d","content":"FurnaceImage.init3d \ninit3d \nCreate a new 3D tensor using the given initializer for each element."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#init4d","title":"FurnaceImage.init4d","content":"FurnaceImage.init4d \ninit4d \nCreate a new 4D tensor using the given initializer for each element."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#isBackendAvailable","title":"FurnaceImage.isBackendAvailable","content":"FurnaceImage.isBackendAvailable \nisBackendAvailable \nIndicates if a given backend is available."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#isCudaAvailable","title":"FurnaceImage.isCudaAvailable","content":"FurnaceImage.isCudaAvailable \nisCudaAvailable \nIndicates if CUDA is available for a given backend."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#isDeviceAvailable","title":"FurnaceImage.isDeviceAvailable","content":"FurnaceImage.isDeviceAvailable \nisDeviceAvailable \nIndicates if a given device is available for a given backend."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#isDeviceTypeAvailable","title":"FurnaceImage.isDeviceTypeAvailable","content":"FurnaceImage.isDeviceTypeAvailable \nisDeviceTypeAvailable \nIndicates if a given device type is available for a given backend."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#isTensor","title":"FurnaceImage.isTensor","content":"FurnaceImage.isTensor \nisTensor \nIndicates if an object is a tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#isinf","title":"FurnaceImage.isinf","content":"FurnaceImage.isinf \nisinf \nReturns a boolean tensor where each element indicates if the corresponding element in the input tensor is an infinity value."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#isnan","title":"FurnaceImage.isnan","content":"FurnaceImage.isnan \nisnan \nReturns a boolean tensor where each element indicates if the corresponding element in the input tensor is a NaN (not-a-number) value."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#jacobian","title":"FurnaceImage.jacobian","content":"FurnaceImage.jacobian \njacobian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#jacobianTv","title":"FurnaceImage.jacobianTv","content":"FurnaceImage.jacobianTv \njacobianTv \nTransposed Jacobian-vector product of a vector-to-vector function \u0060f\u0060, at point \u0060x\u0060, along vector \u0060v\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#jacobianv","title":"FurnaceImage.jacobianv","content":"FurnaceImage.jacobianv \njacobianv \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#laplacian","title":"FurnaceImage.laplacian","content":"FurnaceImage.laplacian \nlaplacian \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#le","title":"FurnaceImage.le","content":"FurnaceImage.le \nle \nReturn a boolean tensor for the element-wise less-than-or-equal comparison of the elements in the two tensors. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#leakyRelu","title":"FurnaceImage.leakyRelu","content":"FurnaceImage.leakyRelu \nleakyRelu \nApplies the leaky rectified linear unit function element-wise \n\\[\\text{LeakyReLU}(x) = \\max(0, x) \u002B \\text{negative\\_slope} * \\min(0, x)\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#like","title":"FurnaceImage.like","content":"FurnaceImage.like \nlike \nReturns a new tensor based on the given .NET value with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#linspace","title":"FurnaceImage.linspace","content":"FurnaceImage.linspace \nlinspace \n\n Returns a 1-D tensor of size \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Esteps\u003C/span\u003E whose values are evenly spaced from \u003Cspan class=\u0022fsdocs-param-name\u0022\u003EstartVal\u003C/span\u003E to \u003Cspan class=\u0022fsdocs-param-name\u0022\u003EendVal\u003C/span\u003E. The values are going to be: \\(\n (\\text{startVal},\n \\text{startVal} \u002B \\frac{\\text{endVal} - \\text{startVal}}{\\text{steps} - 1},\n \\ldots,\n \\text{startVal} \u002B (\\text{steps} - 2) * \\frac{\\text{endVal} - \\text{startVal}}{\\text{steps} - 1},\n \\text{endVal}) \n \\)\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#linspace","title":"FurnaceImage.linspace","content":"FurnaceImage.linspace \nlinspace \n\n Returns a 1-D tensor of size \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Esteps\u003C/span\u003E whose values are evenly spaced from \u003Cspan class=\u0022fsdocs-param-name\u0022\u003EstartVal\u003C/span\u003E to \u003Cspan class=\u0022fsdocs-param-name\u0022\u003EendVal\u003C/span\u003E. The values are going to be: \\(\n (\\text{startVal},\n \\text{startVal} \u002B \\frac{\\text{endVal} - \\text{startVal}}{\\text{steps} - 1},\n \\ldots,\n \\text{startVal} \u002B (\\text{steps} - 2) * \\frac{\\text{endVal} - \\text{startVal}}{\\text{steps} - 1},\n \\text{endVal}) \n \\)\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#load","title":"FurnaceImage.load","content":"FurnaceImage.load \nload \nLoads an object from the given file using a bespoke binary format. \n\n The format used may change from version to version of Furnace.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#log","title":"FurnaceImage.log","content":"FurnaceImage.log \nlog \nReturns a new tensor with the natural logarithm of the elements of input. \n \\[y_{i} = \\log_{e} (x_{i})\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#log10","title":"FurnaceImage.log10","content":"FurnaceImage.log10 \nlog10 \nReturns a new tensor with the logarithm to the base 10 of the elements of input. \n\\[y_{i} = \\log_{10} (x_{i})\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#logsoftmax","title":"FurnaceImage.logsoftmax","content":"FurnaceImage.logsoftmax \nlogsoftmax \nApplies a softmax followed by a logarithm."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#logspace","title":"FurnaceImage.logspace","content":"FurnaceImage.logspace \nlogspace \n\n Returns a 1-D tensor of size \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Esteps\u003C/span\u003E whose values are evenly spaced logarithmically from \\(\\text{baseVal}^{\\text{startVal}}\\) to \\(\\text{baseVal}^{\\text{endVal}}\\). The values are going to be: \\(\n (\\text{baseVal}^{\\text{startVal}},\n \\text{baseVal}^{(\\text{startVal} \u002B \\frac{\\text{endVal} - \\text{startVal}}{ \\text{steps} - 1})},\n \\ldots,\n \\text{baseVal}^{(\\text{startVal} \u002B (\\text{steps} - 2) * \\frac{\\text{endVal} - \\text{startVal}}{ \\text{steps} - 1})},\n \\text{baseVal}^{\\text{endVal}})\n \\)\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#logspace","title":"FurnaceImage.logspace","content":"FurnaceImage.logspace \nlogspace \n\n Returns a 1-D tensor of size \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Esteps\u003C/span\u003E whose values are evenly spaced logarithmically from \\(\\text{baseVal}^{\\text{startVal}}\\) to \\(\\text{baseVal}^{\\text{endVal}}\\). The values are going to be: \\(\n (\\text{baseVal}^{\\text{startVal}},\n \\text{baseVal}^{(\\text{startVal} \u002B \\frac{\\text{endVal} - \\text{startVal}}{ \\text{steps} - 1})},\n \\ldots,\n \\text{baseVal}^{(\\text{startVal} \u002B (\\text{steps} - 2) * \\frac{\\text{endVal} - \\text{startVal}}{ \\text{steps} - 1})},\n \\text{baseVal}^{\\text{endVal}})\n \\)\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#logsumexp","title":"FurnaceImage.logsumexp","content":"FurnaceImage.logsumexp \nlogsumexp \nApplies a logsumexp followed by a logarithm."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#lt","title":"FurnaceImage.lt","content":"FurnaceImage.lt \nlt \nReturns a boolean tensor for the element-wise less-than comparison of the elements in the two tensors. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#map","title":"FurnaceImage.map","content":"FurnaceImage.map \nmap \nProduce a new tensor by mapping a function over all elements of the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#map2","title":"FurnaceImage.map2","content":"FurnaceImage.map2 \nmap2 \nProduce a new tensor by mapping a function over all corresponding elements of two input tensors. \nThe shapes of the two tensors must be identical."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#map3","title":"FurnaceImage.map3","content":"FurnaceImage.map3 \nmap3 \nProduce a new tensor by mapping a function over all corresponding elements of three input tensors. \nThe shapes of the three tensors must be identical."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#mapi","title":"FurnaceImage.mapi","content":"FurnaceImage.mapi \nmapi \nProduce a new tensor by mapping a function over all elements of the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#mapi2","title":"FurnaceImage.mapi2","content":"FurnaceImage.mapi2 \nmapi2 \nProduce a new tensor by mapping a function over all corresponding elements of two input tensors. \nThe function is passed the index of each element. The shapes of the two tensors must be identical."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#mapi3","title":"FurnaceImage.mapi3","content":"FurnaceImage.mapi3 \nmapi3 \nProduce a new tensor by mapping a function over all corresponding elements of three input tensors. \nThe function is passed the index of each element. The shapes of the three tensors must be identical."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#matmul","title":"FurnaceImage.matmul","content":"FurnaceImage.matmul \nmatmul \nMatrix product of two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#max","title":"FurnaceImage.max","content":"FurnaceImage.max \nmax \nReturns the maximum value of all elements in the input tensor along the given dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#max","title":"FurnaceImage.max","content":"FurnaceImage.max \nmax \nEach element of the tensor input is compared with the corresponding element of the tensor other and an element-wise maximum is taken. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#max","title":"FurnaceImage.max","content":"FurnaceImage.max \nmax \nReturns the maximum value of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxpool1d","title":"FurnaceImage.maxpool1d","content":"FurnaceImage.maxpool1d \nmaxpool1d \nApplies a 1D max pooling over an input signal composed of several input planes."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxpool1di","title":"FurnaceImage.maxpool1di","content":"FurnaceImage.maxpool1di \nmaxpool1di \nApplies a 1D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxpool2d","title":"FurnaceImage.maxpool2d","content":"FurnaceImage.maxpool2d \nmaxpool2d \nApplies a 2D max pooling over an input signal composed of several input planes."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxpool2di","title":"FurnaceImage.maxpool2di","content":"FurnaceImage.maxpool2di \nmaxpool2di \nApplies a 2D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxpool3d","title":"FurnaceImage.maxpool3d","content":"FurnaceImage.maxpool3d \nmaxpool3d \nApplies a 3D max pooling over an input signal composed of several input planes."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxpool3di","title":"FurnaceImage.maxpool3di","content":"FurnaceImage.maxpool3di \nmaxpool3di \nApplies a 3D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxunpool1d","title":"FurnaceImage.maxunpool1d","content":"FurnaceImage.maxunpool1d \nmaxunpool1d \nComputes a partial inverse of maxpool1di"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxunpool2d","title":"FurnaceImage.maxunpool2d","content":"FurnaceImage.maxunpool2d \nmaxunpool2d \nComputes a partial inverse of maxpool2di"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#maxunpool3d","title":"FurnaceImage.maxunpool3d","content":"FurnaceImage.maxunpool3d \nmaxunpool3d \nComputes a partial inverse of maxpool3di"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#mean","title":"FurnaceImage.mean","content":"FurnaceImage.mean \nmean \nReturns the mean value of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. \n\n If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#mean","title":"FurnaceImage.mean","content":"FurnaceImage.mean \nmean \nReturns the mean value of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#min","title":"FurnaceImage.min","content":"FurnaceImage.min \nmin \nReturns the minimum value of all elements in the input tensor along the given dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#min","title":"FurnaceImage.min","content":"FurnaceImage.min \nmin \nEach element of the tensor input is compared with the corresponding element of the tensor other and an element-wise minimum is taken. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#min","title":"FurnaceImage.min","content":"FurnaceImage.min \nmin \nReturns the minimum value of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#move","title":"FurnaceImage.move","content":"FurnaceImage.move \nmove \nMove the tensor to a difference device, backend and/or change its element type. \nIf the characteristics are unchanged the input tensor will be returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#mseLoss","title":"FurnaceImage.mseLoss","content":"FurnaceImage.mseLoss \nmseLoss \nCreates a criterion that measures the mean squared error (squared L2 norm) between each element in the input and the target."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#mul","title":"FurnaceImage.mul","content":"FurnaceImage.mul \nmul \nReturn the element-wise multiplication of the two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#multinomial","title":"FurnaceImage.multinomial","content":"FurnaceImage.multinomial \nmultinomial \nReturns a tensor where each row contains numSamples indices sampled from the multinomial probability distribution located in the corresponding row of tensor input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#ne","title":"FurnaceImage.ne","content":"FurnaceImage.ne \nne \nReturns a boolean tensor for the element-wise non-equality comparison of the elements in the two tensors. \nThe shapes of input and other don\u2019t need to match, but they must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#neg","title":"FurnaceImage.neg","content":"FurnaceImage.neg \nneg \nReturn the element-wise negation of the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#nelement","title":"FurnaceImage.nelement","content":"FurnaceImage.nelement \nnelement \nReturns the total number of elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#nest","title":"FurnaceImage.nest","content":"FurnaceImage.nest \nnest \nSet the global nesting level for automatic differentiation."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#nest","title":"FurnaceImage.nest","content":"FurnaceImage.nest \nnest \nIncrease the global nesting level for automatic differentiation."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#nestLevel","title":"FurnaceImage.nestLevel","content":"FurnaceImage.nestLevel \nnestLevel \nGet the global nesting level for automatic differentiation."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#nestReset","title":"FurnaceImage.nestReset","content":"FurnaceImage.nestReset \nnestReset \nReset the global nesting level for automatic differentiation to zero."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#nllLoss","title":"FurnaceImage.nllLoss","content":"FurnaceImage.nllLoss \nnllLoss \nThe negative log likelihood loss."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#noDiff","title":"FurnaceImage.noDiff","content":"FurnaceImage.noDiff \nnoDiff \nProduce a new constant (non-differentiated) tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#normalize","title":"FurnaceImage.normalize","content":"FurnaceImage.normalize \nnormalize \nNormalizes a vector so all the values are between zero and one (min-max scaling to 0..1)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#one","title":"FurnaceImage.one","content":"FurnaceImage.one \none \nGet the scalar \u00271\u0027 tensor for the given configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#oneLike","title":"FurnaceImage.oneLike","content":"FurnaceImage.oneLike \noneLike \nReturns the \u00270\u0027 scalar tensor with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#onehot","title":"FurnaceImage.onehot","content":"FurnaceImage.onehot \nonehot \nReturns a one-hot tensor, with one location set to 1, and all others 0."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#onehotLike","title":"FurnaceImage.onehotLike","content":"FurnaceImage.onehotLike \nonehotLike \n\n A version of FurnaceImage.onehot with characteristics based on the input tensor.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#ones","title":"FurnaceImage.ones","content":"FurnaceImage.ones \nones \nReturns a new tensor of the given length filled with \u00271\u0027 values for the given element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#ones","title":"FurnaceImage.ones","content":"FurnaceImage.ones \nones \nReturns a new tensor filled with \u00271\u0027 values for the given shape, element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#onesLike","title":"FurnaceImage.onesLike","content":"FurnaceImage.onesLike \nonesLike \nReturns a new tensor filled with \u00271\u0027 values with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#pad","title":"FurnaceImage.pad","content":"FurnaceImage.pad \npad \nAdd zero padding to each side of a tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#permute","title":"FurnaceImage.permute","content":"FurnaceImage.permute \npermute \nReturns the original tensor with its dimensions permuted."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#pow","title":"FurnaceImage.pow","content":"FurnaceImage.pow \npow \nReturn the element-wise exponentiation of the two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#primal","title":"FurnaceImage.primal","content":"FurnaceImage.primal \nprimal \nGet the primal value of the tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#primalDerivative","title":"FurnaceImage.primalDerivative","content":"FurnaceImage.primalDerivative \nprimalDerivative \nGet the primal and derivative values of the tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#rand","title":"FurnaceImage.rand","content":"FurnaceImage.rand \nrand \nReturns a tensor filled with random numbers from a uniform distribution on the interval [0, 1)"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#rand","title":"FurnaceImage.rand","content":"FurnaceImage.rand \nrand \nReturns a tensor filled with random numbers from a uniform distribution on the interval [0, 1)"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#randLike","title":"FurnaceImage.randLike","content":"FurnaceImage.randLike \nrandLike \nReturns a tensor filled with random numbers from a uniform distribution on the interval [0, 1) with characteristics based on the input tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#randint","title":"FurnaceImage.randint","content":"FurnaceImage.randint \nrandint \nReturns a tensor filled with random integers generated uniformly between low (inclusive) and high (exclusive)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#randint","title":"FurnaceImage.randint","content":"FurnaceImage.randint \nrandint \nReturns a tensor filled with random integers generated uniformly between low (inclusive) and high (exclusive)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#randintLike","title":"FurnaceImage.randintLike","content":"FurnaceImage.randintLike \nrandintLike \nReturns a tensor with the same shape as Tensor input filled with random integers generated uniformly between low (inclusive) and high (exclusive) with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#randn","title":"FurnaceImage.randn","content":"FurnaceImage.randn \nrandn \nReturns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#randn","title":"FurnaceImage.randn","content":"FurnaceImage.randn \nrandn \nReturns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#randnLike","title":"FurnaceImage.randnLike","content":"FurnaceImage.randnLike \nrandnLike \nReturns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution) with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#relu","title":"FurnaceImage.relu","content":"FurnaceImage.relu \nrelu \nApplies the rectified linear unit function element-wise."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#repeat","title":"FurnaceImage.repeat","content":"FurnaceImage.repeat \nrepeat \nRepeat elements of a tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#reverse","title":"FurnaceImage.reverse","content":"FurnaceImage.reverse \nreverse \nCompute the reverse-mode derivative at the given output tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#reverseDiff","title":"FurnaceImage.reverseDiff","content":"FurnaceImage.reverseDiff \nreverseDiff \nProduce a new tensor suitable for calculating the reverse-mode derivative at the given level tag."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#reversePush","title":"FurnaceImage.reversePush","content":"FurnaceImage.reversePush \nreversePush \nPush the given value as part of the reverse-mode computation at the given output tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#reverseReset","title":"FurnaceImage.reverseReset","content":"FurnaceImage.reverseReset \nreverseReset \nReset the reverse mode computation associated with the given output tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#round","title":"FurnaceImage.round","content":"FurnaceImage.round \nround \nReturns a new tensor with each of the elements of input rounded to the closest integer. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#safelog","title":"FurnaceImage.safelog","content":"FurnaceImage.safelog \nsafelog \nReturns the logarithm of the tensor after clamping the tensor so that all its elements are greater than epsilon. This is to avoid a -inf result for elements equal to zero."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#save","title":"FurnaceImage.save","content":"FurnaceImage.save \nsave \nSaves the object to the given file using a bespoke binary format. \n\n The format used may change from version to version of Furnace.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#scalar","title":"FurnaceImage.scalar","content":"FurnaceImage.scalar \nscalar \nReturns a new scalar tensor with the value \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Evalue\u003C/span\u003E, for the given element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#scatter","title":"FurnaceImage.scatter","content":"FurnaceImage.scatter \nscatter \nGathers values along an axis specified by dim."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#seed","title":"FurnaceImage.seed","content":"FurnaceImage.seed \nseed \nSeeds all backends with the given random seed, or a new seed based on the current time if no seed is specified."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sigmoid","title":"FurnaceImage.sigmoid","content":"FurnaceImage.sigmoid \nsigmoid \nApplies the sigmoid element-wise function \n\\[\\text{Sigmoid}(x) = \\frac{1}{1 \u002B \\exp(-x)}\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sign","title":"FurnaceImage.sign","content":"FurnaceImage.sign \nsign \nReturns a new tensor with the signs of the elements of input. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sin","title":"FurnaceImage.sin","content":"FurnaceImage.sin \nsin \nReturns a new tensor with the sine of the elements of input"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sinh","title":"FurnaceImage.sinh","content":"FurnaceImage.sinh \nsinh \nReturns a new tensor with the hyperbolic sine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#slice","title":"FurnaceImage.slice","content":"FurnaceImage.slice \nslice \nGet a slice of a tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#softmax","title":"FurnaceImage.softmax","content":"FurnaceImage.softmax \nsoftmax \nApplies a softmax function. \nSoftmax is defined as: \\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#softplus","title":"FurnaceImage.softplus","content":"FurnaceImage.softplus \nsoftplus \nApplies the softplus function element-wise. \n\\[\\text{Softplus}(x) = \\frac{1}{\\beta} * \\log(1 \u002B \\exp(\\beta * x))\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#split","title":"FurnaceImage.split","content":"FurnaceImage.split \nsplit \nSplits the tensor into chunks. The tensor will be split into sizes.Length chunks each with a corresponding size in the given dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sqrt","title":"FurnaceImage.sqrt","content":"FurnaceImage.sqrt \nsqrt \nReturns a new tensor with the square-root of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#squeeze","title":"FurnaceImage.squeeze","content":"FurnaceImage.squeeze \nsqueeze \nReturns a tensor with all the dimensions of input of size 1 removed. \nIf the tensor has a batch dimension of size 1, then squeeze(input) will also remove the batch dimension, which can lead to unexpected errors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#stack","title":"FurnaceImage.stack","content":"FurnaceImage.stack \nstack \nConcatenates sequence of tensors along a new dimension \nAll tensors need to be of the same size."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#standardize","title":"FurnaceImage.standardize","content":"FurnaceImage.standardize \nstandardize \nReturns the tensor after standardization (z-score normalization)"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#std","title":"FurnaceImage.std","content":"FurnaceImage.std \nstd \nReturns the standard deviation of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. \n\n If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).\n If unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#std","title":"FurnaceImage.std","content":"FurnaceImage.std \nstd \nReturns the standard deviation of all elements in the input tensor. \n\n If unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sub","title":"FurnaceImage.sub","content":"FurnaceImage.sub \nsub \nReturn the element-wise subtraction of the two tensors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sum","title":"FurnaceImage.sum","content":"FurnaceImage.sum \nsum \nReturns the sum of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. \n\n If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#sum","title":"FurnaceImage.sum","content":"FurnaceImage.sum \nsum \nReturns the sum of all elements in the input tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#tan","title":"FurnaceImage.tan","content":"FurnaceImage.tan \ntan \nReturns a new tensor with the tangent of the elements of input"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#tanh","title":"FurnaceImage.tanh","content":"FurnaceImage.tanh \ntanh \nReturns a new tensor with the hyperbolic tangent of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#tensor","title":"FurnaceImage.tensor","content":"FurnaceImage.tensor \ntensor \n\n Creates a new tensor from the given data, using the given element type and configuration.\n \n\n The data is converted from arrays, sequences, lists and tuples of primitive values to a tensor whose shape is inferred from the data.\n The fastest creation technique is a one dimensional array matching the desired dtype. Then use \u0027view\u0027 to reshape."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#toImage","title":"FurnaceImage.toImage","content":"FurnaceImage.toImage \ntoImage \nConvert tensor to an image tensor with shape Channels x Height x Width \nIf the input tensor has 4 dimensions, then make a single image grid."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#toImageString","title":"FurnaceImage.toImageString","content":"FurnaceImage.toImageString \ntoImageString \nConvert tensor to a grayscale image tensor and return a string representation approximating grayscale values"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#trace","title":"FurnaceImage.trace","content":"FurnaceImage.trace \ntrace \nReturns the sum of the elements of the diagonal of the input 2-D matrix"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#transpose","title":"FurnaceImage.transpose","content":"FurnaceImage.transpose \ntranspose \nReturns a tensor that is a transposed version of input with dimensions 0 and 1 swapped."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#transpose","title":"FurnaceImage.transpose","content":"FurnaceImage.transpose \ntranspose \nReturns a tensor that is a transposed version of input. The given dimensions dim0 and dim1 are swapped."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#undilate","title":"FurnaceImage.undilate","content":"FurnaceImage.undilate \nundilate \nReverse the dilation of the tensor in using the given dilations in each corresponding dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#unflatten","title":"FurnaceImage.unflatten","content":"FurnaceImage.unflatten \nunflatten \nUnflattens a tensor dimension by expanding it to the given shape."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#unsqueeze","title":"FurnaceImage.unsqueeze","content":"FurnaceImage.unsqueeze \nunsqueeze \nReturns a new tensor with a dimension of size one inserted at the specified position"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#unsqueezeAs","title":"FurnaceImage.unsqueezeAs","content":"FurnaceImage.unsqueezeAs \nunsqueezeAs \nReturns a new tensor with dimensions of size one appended to the end until the number of dimensions is the same as the other tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#unstack","title":"FurnaceImage.unstack","content":"FurnaceImage.unstack \nunstack \nRemoves a tensor dimension"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#var","title":"FurnaceImage.var","content":"FurnaceImage.var \nvar \nReturns the variance of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. \n\n If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).\n If unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#var","title":"FurnaceImage.var","content":"FurnaceImage.var \nvar \nReturns the variance of all elements in the input tensor. \n\n If unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#view","title":"FurnaceImage.view","content":"FurnaceImage.view \nview \nReturns a new tensor with the same data as the self tensor but of a different shape. \nThe returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size.\n The returned tensor shares the same data and must have the same number of elements, but may have a different size. \n For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension,\n or only span across original dimensions \\(d, d\u002B1, \\dots, d\u002Bkd,d\u002B1,\u2026,d\u002Bk\\) that satisfy the following contiguity-like condition that\n \\(\\forall i = d, \\dots, d\u002Bk-1\u2200i=d,\u2026,d\u002Bk\u22121 ,\\) \\[\\text{stride}[i] = \\text{stride}[i\u002B1] \\times \\text{size}[i\u002B1]\\]\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#view","title":"FurnaceImage.view","content":"FurnaceImage.view \nview \nReturns a new tensor with the same data as the self tensor but of a different shape. \nThe returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size.\n The returned tensor shares the same data and must have the same number of elements, but may have a different size. \n For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension,\n or only span across original dimensions \\(d, d\u002B1, \\dots, d\u002Bkd,d\u002B1,\u2026,d\u002Bk\\) that satisfy the following contiguity-like condition that\n \\(\\forall i = d, \\dots, d\u002Bk-1\u2200i=d,\u2026,d\u002Bk\u22121 ,\\) \\[\\text{stride}[i] = \\text{stride}[i\u002B1] \\times \\text{size}[i\u002B1]\\]\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#viewAs","title":"FurnaceImage.viewAs","content":"FurnaceImage.viewAs \nviewAs \nView this tensor as the same size as other. \nThe returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size.\n The returned tensor shares the same data and must have the same number of elements, but may have a different size. \n For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension,\n or only span across original dimensions \\(d, d\u002B1, \\dots, d\u002Bkd,d\u002B1,\u2026,d\u002Bk\\) that satisfy the following contiguity-like condition that\n \\(\\forall i = d, \\dots, d\u002Bk-1\u2200i=d,\u2026,d\u002Bk\u22121 ,\\) \\[\\text{stride}[i] = \\text{stride}[i\u002B1] \\times \\text{size}[i\u002B1]\\]\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#zero","title":"FurnaceImage.zero","content":"FurnaceImage.zero \nzero \nGet the scalar zero tensor for the given configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#zeroCreate","title":"FurnaceImage.zeroCreate","content":"FurnaceImage.zeroCreate \nzeroCreate \nCreate a new 1D tensor using \u00270\u0027 as value for each element."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#zeroLike","title":"FurnaceImage.zeroLike","content":"FurnaceImage.zeroLike \nzeroLike \nReturns the \u00270\u0027 scalar tensor with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#zeros","title":"FurnaceImage.zeros","content":"FurnaceImage.zeros \nzeros \nReturns a new tensor filled with \u00270\u0027 values for the given length, element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#zeros","title":"FurnaceImage.zeros","content":"FurnaceImage.zeros \nzeros \nReturns a new tensor filled with \u00270\u0027 values for the given shape, element type and configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#zerosLike","title":"FurnaceImage.zerosLike","content":"FurnaceImage.zerosLike \nzerosLike \nReturns a new tensor filled with \u00270\u0027 values with characteristics based on the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#version","title":"FurnaceImage.version","content":"FurnaceImage.version \nversion \nReturns the version of the Furnace.Core assembly."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html","title":"Printer","content":"Printer \n \nPrinter.threshold \nthreshold \nPrinter.precision \nprecision \nPrinter.edgeItems \nedgeItems \nPrinter.Default \nDefault \nPrinter.Short \nShort \nPrinter.Full \nFull \nPrinter.Custom \nCustom"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html#threshold","title":"Printer.threshold","content":"Printer.threshold \nthreshold \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html#precision","title":"Printer.precision","content":"Printer.precision \nprecision \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html#edgeItems","title":"Printer.edgeItems","content":"Printer.edgeItems \nedgeItems \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html#Default","title":"Printer.Default","content":"Printer.Default \nDefault \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html#Short","title":"Printer.Short","content":"Printer.Short \nShort \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html#Full","title":"Printer.Full","content":"Printer.Full \nFull \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-printer.html#Custom","title":"Printer.Custom","content":"Printer.Custom \nCustom \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-shape.html","title":"Shape","content":"Shape \n\n Represents the shape of a tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html","title":"Tensor","content":"Tensor \n\n Represents a multi-dimensional data type containing elements of a single data type.\n \nTensor.GetSlice \nGetSlice \nTensor.abs \nabs \nTensor.acos \nacos \nTensor.add \nadd \nTensor.add \nadd \nTensor.addSlice \naddSlice \nTensor.allclose \nallclose \nTensor.ancestors \nancestors \nTensor.arangeLike \narangeLike \nTensor.arangeLike \narangeLike \nTensor.argmax \nargmax \nTensor.argmax \nargmax \nTensor.argmin \nargmin \nTensor.argmin \nargmin \nTensor.asin \nasin \nTensor.atan \natan \nTensor.backward \nbackward \nTensor.bceLoss \nbceLoss \nTensor.bernoulli \nbernoulli \nTensor.bfloat16 \nbfloat16 \nTensor.bool \nbool \nTensor.byte \nbyte \nTensor.cast \ncast \nTensor.cast \ncast \nTensor.ceil \nceil \nTensor.clamp \nclamp \nTensor.clone \nclone \nTensor.conv1d \nconv1d \nTensor.conv2d \nconv2d \nTensor.conv3d \nconv3d \nTensor.convTranspose1d \nconvTranspose1d \nTensor.convTranspose2d \nconvTranspose2d \nTensor.convTranspose3d \nconvTranspose3d \nTensor.corrcoef \ncorrcoef \nTensor.cos \ncos \nTensor.cosh \ncosh \nTensor.cov \ncov \nTensor.cpu \ncpu \nTensor.crossEntropyLoss \ncrossEntropyLoss \nTensor.diagonal \ndiagonal \nTensor.dilate \ndilate \nTensor.div \ndiv \nTensor.div \ndiv \nTensor.dot \ndot \nTensor.double \ndouble \nTensor.dropout \ndropout \nTensor.dropout2d \ndropout2d \nTensor.dropout3d \ndropout3d \nTensor.eq \neq \nTensor.exp \nexp \nTensor.expand \nexpand \nTensor.expandAs \nexpandAs \nTensor.flatten \nflatten \nTensor.flip \nflip \nTensor.float \nfloat \nTensor.float16 \nfloat16 \nTensor.float32 \nfloat32 \nTensor.float64 \nfloat64 \nTensor.floor \nfloor \nTensor.forwardDiff \nforwardDiff \nTensor.fullLike \nfullLike \nTensor.gather \ngather \nTensor.ge \nge \nTensor.gpu \ngpu \nTensor.gt \ngt \nTensor.hasinf \nhasinf \nTensor.hasinfnan \nhasinfnan \nTensor.hasnan \nhasnan \nTensor.int \nint \nTensor.int16 \nint16 \nTensor.int32 \nint32 \nTensor.int64 \nint64 \nTensor.int8 \nint8 \nTensor.isSameDiffType \nisSameDiffType \nTensor.isinf \nisinf \nTensor.isnan \nisnan \nTensor.le \nle \nTensor.leakyRelu \nleakyRelu \nTensor.like \nlike \nTensor.linspaceLike \nlinspaceLike \nTensor.linspaceLike \nlinspaceLike \nTensor.log \nlog \nTensor.log10 \nlog10 \nTensor.logsoftmax \nlogsoftmax \nTensor.logspaceLike \nlogspaceLike \nTensor.logspaceLike \nlogspaceLike \nTensor.logsumexp \nlogsumexp \nTensor.lt \nlt \nTensor.matmul \nmatmul \nTensor.max \nmax \nTensor.max \nmax \nTensor.max \nmax \nTensor.maxpool1d \nmaxpool1d \nTensor.maxpool1di \nmaxpool1di \nTensor.maxpool2d \nmaxpool2d \nTensor.maxpool2di \nmaxpool2di \nTensor.maxpool3d \nmaxpool3d \nTensor.maxpool3di \nmaxpool3di \nTensor.maxunpool1d \nmaxunpool1d \nTensor.maxunpool2d \nmaxunpool2d \nTensor.maxunpool3d \nmaxunpool3d \nTensor.mean \nmean \nTensor.mean \nmean \nTensor.min \nmin \nTensor.min \nmin \nTensor.min \nmin \nTensor.move \nmove \nTensor.move \nmove \nTensor.move \nmove \nTensor.mseLoss \nmseLoss \nTensor.mul \nmul \nTensor.mul \nmul \nTensor.multinomial \nmultinomial \nTensor.ne \nne \nTensor.neg \nneg \nTensor.nllLoss \nnllLoss \nTensor.noDiff \nnoDiff \nTensor.normalize \nnormalize \nTensor.oneLike \noneLike \nTensor.onehotLike \nonehotLike \nTensor.onesLike \nonesLike \nTensor.pad \npad \nTensor.permute \npermute \nTensor.pow \npow \nTensor.pow \npow \nTensor.randLike \nrandLike \nTensor.randintLike \nrandintLike \nTensor.randnLike \nrandnLike \nTensor.relu \nrelu \nTensor.repeat \nrepeat \nTensor.reverse \nreverse \nTensor.reverseDiff \nreverseDiff \nTensor.reversePush \nreversePush \nTensor.reverseReset \nreverseReset \nTensor.round \nround \nTensor.safelog \nsafelog \nTensor.save \nsave \nTensor.scalarLike \nscalarLike \nTensor.scatter \nscatter \nTensor.sigmoid \nsigmoid \nTensor.sign \nsign \nTensor.sin \nsin \nTensor.sinh \nsinh \nTensor.softmax \nsoftmax \nTensor.softplus \nsoftplus \nTensor.split \nsplit \nTensor.sqrt \nsqrt \nTensor.squeeze \nsqueeze \nTensor.standardize \nstandardize \nTensor.std \nstd \nTensor.std \nstd \nTensor.sub \nsub \nTensor.sub \nsub \nTensor.sum \nsum \nTensor.sum \nsum \nTensor.sumToSize \nsumToSize \nTensor.summary \nsummary \nTensor.tan \ntan \nTensor.tanh \ntanh \nTensor.toArray \ntoArray \nTensor.toArray1D \ntoArray1D \nTensor.toArray2D \ntoArray2D \nTensor.toArray3D \ntoArray3D \nTensor.toArray4D \ntoArray4D \nTensor.toArray5D \ntoArray5D \nTensor.toArray6D \ntoArray6D \nTensor.toBool \ntoBool \nTensor.toByte \ntoByte \nTensor.toDouble \ntoDouble \nTensor.toImage \ntoImage \nTensor.toImageString \ntoImageString \nTensor.toInt16 \ntoInt16 \nTensor.toInt32 \ntoInt32 \nTensor.toInt64 \ntoInt64 \nTensor.toSByte \ntoSByte \nTensor.toScalar \ntoScalar \nTensor.toSingle \ntoSingle \nTensor.trace \ntrace \nTensor.transpose \ntranspose \nTensor.transpose \ntranspose \nTensor.undilate \nundilate \nTensor.unflatten \nunflatten \nTensor.unsqueeze \nunsqueeze \nTensor.unsqueezeAs \nunsqueezeAs \nTensor.unstack \nunstack \nTensor.var \nvar \nTensor.var \nvar \nTensor.view \nview \nTensor.view \nview \nTensor.viewAs \nviewAs \nTensor.zeroLike \nzeroLike \nTensor.zerosLike \nzerosLike \nTensor.backend \nbackend \nTensor.primalDeep \nprimalDeep \nTensor.elementSize \nelementSize \nTensor.isNoDiff \nisNoDiff \nTensor.nestingTag \nnestingTag \nTensor.Item \nItem \nTensor.deviceType \ndeviceType \nTensor.isForwardDiff \nisForwardDiff \nTensor.depth \ndepth \nTensor.fanout \nfanout \nTensor.device \ndevice \nTensor.nelement \nnelement \nTensor.memorySize \nmemorySize \nTensor.primal \nprimal \nTensor.derivative \nderivative \nTensor.isReverseDiff \nisReverseDiff \nTensor.shape \nshape \nTensor.dim \ndim \nTensor.parentOp \nparentOp \nTensor.dtype \ndtype \nTensor.derivativeDeep \nderivativeDeep \nTensor.primalRaw \nprimalRaw \nTensor.Abs \nAbs \nTensor.Acos \nAcos \nTensor.Asin \nAsin \nTensor.Atan \nAtan \nTensor.Ceiling \nCeiling \nTensor.Cos \nCos \nTensor.Cosh \nCosh \nTensor.Exp \nExp \nTensor.Floor \nFloor \nTensor.Log \nLog \nTensor.Log10 \nLog10 \nTensor.Op \nOp \nTensor.Op \nOp \nTensor.Pow \nPow \nTensor.Pow \nPow \nTensor.Pow \nPow \nTensor.Pow \nPow \nTensor.Pow \nPow \nTensor.Pow \nPow \nTensor.Pow \nPow \nTensor.Round \nRound \nTensor.Sin \nSin \nTensor.Sinh \nSinh \nTensor.Sqrt \nSqrt \nTensor.Tan \nTan \nTensor.Tanh \nTanh \nTensor.cat \ncat \nTensor.create \ncreate \nTensor.eye \neye \nTensor.load \nload \nTensor.ofRawTensor \nofRawTensor \nTensor.(\u002B) \n(\u002B) \nTensor.(\u002B) \n(\u002B) \nTensor.(\u002B) \n(\u002B) \nTensor.(/) \n(/) \nTensor.(/) \n(/) \nTensor.(/) \n(/) \nTensor.op_Explicit \nop_Explicit \nTensor.op_Explicit \nop_Explicit \nTensor.op_Explicit \nop_Explicit \nTensor.op_Explicit \nop_Explicit \nTensor.op_Explicit \nop_Explicit \nTensor.op_Explicit \nop_Explicit \nTensor.op_Explicit \nop_Explicit \nTensor.op_Explicit \nop_Explicit \nTensor.(--\u003E) \n(--\u003E) \nTensor.(*) \n(*) \nTensor.(*) \n(*) \nTensor.(*) \n(*) \nTensor.(-) \n(-) \nTensor.(-) \n(-) \nTensor.(-) \n(-) \nTensor.(~-) \n(~-) \nTensor.stack \nstack \nTensor.Zero \nZero \nTensor.One \nOne"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#GetSlice","title":"Tensor.GetSlice","content":"Tensor.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#abs","title":"Tensor.abs","content":"Tensor.abs \nabs \nComputes the element-wise absolute value of the given input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#acos","title":"Tensor.acos","content":"Tensor.acos \nacos \nReturns a new tensor with the arccosine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#add","title":"Tensor.add","content":"Tensor.add \nadd \nEach element of the object tensor is added to the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#add","title":"Tensor.add","content":"Tensor.add \nadd \nEach element of the object tensor is added to each corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#addSlice","title":"Tensor.addSlice","content":"Tensor.addSlice \naddSlice \nAdd the given tensor as a slice at the given location."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#allclose","title":"Tensor.allclose","content":"Tensor.allclose \nallclose \n\n Indicates if two tensors have the same shape and all corresponding elements are equal within the\n given tolerances.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#ancestors","title":"Tensor.ancestors","content":"Tensor.ancestors \nancestors \n\n A debugging routine that returns the ancestors of a tensor involved in reverse-mode automatic differentiation\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#arangeLike","title":"Tensor.arangeLike","content":"Tensor.arangeLike \narangeLike \n\n Returns a tensor in the manner of \u003Csee cref=\u0022M:Furnace.FurnaceImage.arange\u0022/\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#arangeLike","title":"Tensor.arangeLike","content":"Tensor.arangeLike \narangeLike \n\n Returns a tensor in the manner of \u003Csee cref=\u0022M:Furnace.FurnaceImage.arange\u0022/\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#argmax","title":"Tensor.argmax","content":"Tensor.argmax \nargmax \nReturns the indexes of maximum values of the primal of the tensor, reducing the given dimension. \nThe resulting tensor does not participate in reverse or forward differentiation. It can be used as input to another operation such as \u003Ccode\u003EFurnaceImage.gather\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#argmax","title":"Tensor.argmax","content":"Tensor.argmax \nargmax \n\n Gets the index of a maximum value in the tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#argmin","title":"Tensor.argmin","content":"Tensor.argmin \nargmin \nReturns the indexes of minimum values of the primal of the tensor, reducing the given dimension. \nThe resulting tensor does not participate in reverse or forward differentiation. It can be used as input to another operation such as \u003Ccode\u003EFurnaceImage.gather\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#argmin","title":"Tensor.argmin","content":"Tensor.argmin \nargmin \n\n Gets the index of a minimum value in the tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#asin","title":"Tensor.asin","content":"Tensor.asin \nasin \nReturns a new tensor with the arcsine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#atan","title":"Tensor.atan","content":"Tensor.atan \natan \nReturns a new tensor with the arctangent of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#backward","title":"Tensor.backward","content":"Tensor.backward \nbackward \nSee \u003Ccode\u003Ereverse\u003C/code\u003E"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#bceLoss","title":"Tensor.bceLoss","content":"Tensor.bceLoss \nbceLoss \nCreates a criterion that measures the Binary Cross Entropy between the target and the output"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#bernoulli","title":"Tensor.bernoulli","content":"Tensor.bernoulli \nbernoulli \nDraws binary random numbers (0 or 1) from a Bernoulli distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#bfloat16","title":"Tensor.bfloat16","content":"Tensor.bfloat16 \nbfloat16 \n\n Returns a new tensor with each element converted to type bfloat16\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#bool","title":"Tensor.bool","content":"Tensor.bool \nbool \n\n Returns a new tensor with each element converted to type bool\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#byte","title":"Tensor.byte","content":"Tensor.byte \nbyte \n\n Returns a new tensor with each element converted to type float64\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#cast","title":"Tensor.cast","content":"Tensor.cast \ncast \n\n Converts the tensor to a new tensor with the given system type\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#cast","title":"Tensor.cast","content":"Tensor.cast \ncast \n\n Converts the tensor to a new tensor with the given \u003Csee cref=\u0022T:Furnace.Dtype\u0022/\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#ceil","title":"Tensor.ceil","content":"Tensor.ceil \nceil \nReturns a new tensor with the ceil of the elements of input, the smallest integer greater than or equal to each element. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#clamp","title":"Tensor.clamp","content":"Tensor.clamp \nclamp \nClamp all elements in input into the range [ low..high] and return a resulting tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#clone","title":"Tensor.clone","content":"Tensor.clone \nclone \nReturns a new tensor with underlying storage copied. \n\n This method discards differentiability and returns a constant tensor.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#conv1d","title":"Tensor.conv1d","content":"Tensor.conv1d \nconv1d \nApplies a 1D convolution over an input signal composed of several input planes"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#conv2d","title":"Tensor.conv2d","content":"Tensor.conv2d \nconv2d \nApplies a 2D convolution over an input signal composed of several input planes"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#conv3d","title":"Tensor.conv3d","content":"Tensor.conv3d \nconv3d \nApplies a 3D convolution over an input signal composed of several input planes"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#convTranspose1d","title":"Tensor.convTranspose1d","content":"Tensor.convTranspose1d \nconvTranspose1d \nApplies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called \u0027deconvolution\u0027."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#convTranspose2d","title":"Tensor.convTranspose2d","content":"Tensor.convTranspose2d \nconvTranspose2d \nApplies a 2D transposed convolution operator over an input signal composed of several input planes, sometimes also called \u0027deconvolution\u0027."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#convTranspose3d","title":"Tensor.convTranspose3d","content":"Tensor.convTranspose3d \nconvTranspose3d \nApplies a 3D transposed convolution operator over an input signal composed of several input planes, sometimes also called \u0027deconvolution\u0027."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#corrcoef","title":"Tensor.corrcoef","content":"Tensor.corrcoef \ncorrcoef \n\n Estimates the Pearson correlation coefficient matrix for the given tensor. The tensor\u0027s first\n dimension should index variables and the second dimension should\n index observations for each variable.\n \n\n The correlation between variables \\(x\\) and \\(y\\) is\n \\[cor(x,y)= \\frac{\\sum^{N}_{i = 1}(x_{i} - \\mu_x)(y_{i} - \\mu_y)}{\\sigma_x \\sigma_y (N ~-~1)}\\]\n where \\(\\mu_x\\) and \\(\\mu_y\\) are the sample means and \\(\\sigma_x\\) and \\(\\sigma_x\\) are \n the sample standard deviations.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#cos","title":"Tensor.cos","content":"Tensor.cos \ncos \nReturns a new tensor with the cosine of the elements of input"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#cosh","title":"Tensor.cosh","content":"Tensor.cosh \ncosh \nReturns a new tensor with the hyperbolic cosine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#cov","title":"Tensor.cov","content":"Tensor.cov \ncov \n\n Estimates the covariance matrix of the given tensor. The tensor\u0027s first\n dimension should index variables and the second dimension should\n index observations for each variable.\n \n\n If no weights are given, the covariance between variables \\(x\\) and \\(y\\) is\n \\[cov(x,y)= \\frac{\\sum^{N}_{i = 1}(x_{i} - \\mu_x)(y_{i} - \\mu_y)}{N~-~\\text{correction}}\\]\n where \\(\\mu_x\\) and \\(\\mu_y\\) are the sample means.\n \n If there are fweights or aweights then the covariance is\n \\[cov(x,y)=\\frac{\\sum^{N}_{i = 1}w_i(x_{i} - \\mu_x^*)(y_{i} - \\mu_y^*)}{\\text{normalization factor}}\\]\n where \\(w\\) is either fweights or aweights if one weight type is provided.\n If both weight types are provided \\(w=\\text{fweights}\\times\\text{aweights}\\). \n \\(\\mu_x^* = \\frac{\\sum^{N}_{i = 1}w_ix_{i} }{\\sum^{N}_{i = 1}w_i}\\)\n is the weighted mean of variables.\n The normalization factor is \\(\\sum^{N}_{i=1} w_i\\) if only fweights are provided or if aweights are provided and \u003Ccode\u003Ecorrection=0\u003C/code\u003E. \n Otherwise if aweights \\(aw\\) are provided the normalization factor is\n \\(\\sum^N_{i=1} w_i - \\text{correction}\\times\\frac{\\sum^N_{i=1} w_i aw_i}{\\sum^N_{i=1} w_i}\\) \n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#cpu","title":"Tensor.cpu","content":"Tensor.cpu \ncpu \n\n Returns a new tensor with the same contents moved to the CPU\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#crossEntropyLoss","title":"Tensor.crossEntropyLoss","content":"Tensor.crossEntropyLoss \ncrossEntropyLoss \nThis criterion combines logsoftmax and nllLoss in a single function"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#diagonal","title":"Tensor.diagonal","content":"Tensor.diagonal \ndiagonal \n\n Returns a tensor with the diagonal elements with respect to \u003Ccode\u003Edim1\u003C/code\u003E and \u003Ccode\u003Edim2\u003C/code\u003E.\n The argument offset controls which diagonal to consider.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#dilate","title":"Tensor.dilate","content":"Tensor.dilate \ndilate \nDilate the tensor in using the given dilations in each corresponding dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#div","title":"Tensor.div","content":"Tensor.div \ndiv \nDivides each element of the object tensor by the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#div","title":"Tensor.div","content":"Tensor.div \ndiv \nDivides each element of the object tensor by the corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#dot","title":"Tensor.dot","content":"Tensor.dot \ndot \nComputes the dot product (inner product) of two vector (1d-tensors). \nThis function does not broadcast and expects this tensor to be a vector (1d-tensor). \n The tensors must have the same number of elements.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#double","title":"Tensor.double","content":"Tensor.double \ndouble \n\n Returns a new tensor with each element converted to type float64\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#dropout","title":"Tensor.dropout","content":"Tensor.dropout \ndropout \nRandomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#dropout2d","title":"Tensor.dropout2d","content":"Tensor.dropout2d \ndropout2d \nRandomly zero out entire channels (a channel is a 2D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 2D tensor \\text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#dropout3d","title":"Tensor.dropout3d","content":"Tensor.dropout3d \ndropout3d \nRandomly zero out entire channels (a channel is a 3D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 3D tensor \\text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#eq","title":"Tensor.eq","content":"Tensor.eq \neq \nComputes element-wise \\(a = b\\), returning a boolean tensor containing a \u003Ccode\u003Etrue\u003C/code\u003E at each location where the comparison is true"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#exp","title":"Tensor.exp","content":"Tensor.exp \nexp \nApplies the exp function element-wise."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#expand","title":"Tensor.expand","content":"Tensor.expand \nexpand \nReturns a new view of the object tensor with singleton dimensions expanded to a larger size. \n\u003Cp class=\u0027fsdocs-para\u0027\u003EPassing -1 as the size for a dimension means not changing the size of that dimension.\u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003EThe tensor can be also expanded to a larger number of dimensions, and the new ones will be appended \n at the front. For the new dimensions, the size cannot be set to -1.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n Expanding a tensor does not allocate new memory, but only creates a new view on the existing tensor\n where a dimension of size one is expanded to a larger size by setting the stride to 0. Any dimension\n of size 1 can be expanded to an arbitrary value without allocating new memory.\n \u003C/p\u003E"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#expandAs","title":"Tensor.expandAs","content":"Tensor.expandAs \nexpandAs \nExpand this tensor to the same size as the other."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#flatten","title":"Tensor.flatten","content":"Tensor.flatten \nflatten \nFlattens a contiguous range of dims in a tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#flip","title":"Tensor.flip","content":"Tensor.flip \nflip \nReverse the order of a n-D tensor along given axis in dims"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#float","title":"Tensor.float","content":"Tensor.float \nfloat \n\n Returns a new tensor with each element converted to type float64\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#float16","title":"Tensor.float16","content":"Tensor.float16 \nfloat16 \n\n Returns a new tensor with each element converted to type float16\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#float32","title":"Tensor.float32","content":"Tensor.float32 \nfloat32 \n\n Returns a new tensor with each element converted to type float32\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#float64","title":"Tensor.float64","content":"Tensor.float64 \nfloat64 \n\n Returns a new tensor with each element converted to type float64\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#floor","title":"Tensor.floor","content":"Tensor.floor \nfloor \nReturns a new tensor with the floor of the elements of input, the largest integer less than or equal to each element. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#forwardDiff","title":"Tensor.forwardDiff","content":"Tensor.forwardDiff \nforwardDiff \n\n Returns the input tensor with added support for forward-mode automatic differentiation.\n \n\n Any tensors produced using this tensor will have attached derivatives for forward mode propagation.\n The current global nesting level is used for nested differentiation.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#fullLike","title":"Tensor.fullLike","content":"Tensor.fullLike \nfullLike \n\n Returns a new tensor filled with the given scalar value for the given shape, element type and configuration, defaulting to the \n shape and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#gather","title":"Tensor.gather","content":"Tensor.gather \ngather \nGathers values along an axis specified by dim."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#ge","title":"Tensor.ge","content":"Tensor.ge \nge \nComputes element-wise \\(a \\geq b\\), returning a boolean tensor containing a \u003Ccode\u003Etrue\u003C/code\u003E at each location where the comparison is true"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#gpu","title":"Tensor.gpu","content":"Tensor.gpu \ngpu \n\n Returns a new tensor with the same contents moved to the primary GPU device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#gt","title":"Tensor.gt","content":"Tensor.gt \ngt \nComputes element-wise \\(a \u003E b\\), returning a boolean tensor containing a \u003Ccode\u003Etrue\u003C/code\u003E at each location where the comparison is true"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#hasinf","title":"Tensor.hasinf","content":"Tensor.hasinf \nhasinf \n\n Gets if any value in the tensor is \u002B/- INF.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#hasinfnan","title":"Tensor.hasinfnan","content":"Tensor.hasinfnan \nhasinfnan \n\n Gets if any value in the tensor is NaN or \u002B/- INF.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#hasnan","title":"Tensor.hasnan","content":"Tensor.hasnan \nhasnan \n\n Gets if any value in the tensor is NaN.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#int","title":"Tensor.int","content":"Tensor.int \nint \n\n Returns a new tensor with each element converted to type int32\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#int16","title":"Tensor.int16","content":"Tensor.int16 \nint16 \n\n Returns a new tensor with each element converted to type int16\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#int32","title":"Tensor.int32","content":"Tensor.int32 \nint32 \n\n Returns a new tensor with each element converted to type int32\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#int64","title":"Tensor.int64","content":"Tensor.int64 \nint64 \n\n Returns a new tensor with each element converted to type int64\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#int8","title":"Tensor.int8","content":"Tensor.int8 \nint8 \n\n Returns a new tensor with each element converted to type int8\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#isSameDiffType","title":"Tensor.isSameDiffType","content":"Tensor.isSameDiffType \nisSameDiffType \n\n Indicates if two tensors have the same differentiation type\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#isinf","title":"Tensor.isinf","content":"Tensor.isinf \nisinf \nReturns a new tensor with boolean elements representing if each element is \u002B/-INF or not."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#isnan","title":"Tensor.isnan","content":"Tensor.isnan \nisnan \nReturns a new tensor with boolean elements representing if each element is NaN or not. Complex values are considered NaN when either their real and/or imaginary part is NaN."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#le","title":"Tensor.le","content":"Tensor.le \nle \nComputes element-wise \\(a \\leq b\\), returning a boolean tensor containing a \u003Ccode\u003Etrue\u003C/code\u003E at each location where the comparison is true"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#leakyRelu","title":"Tensor.leakyRelu","content":"Tensor.leakyRelu \nleakyRelu \nApplies the leaky rectified linear unit function element-wise \n\\[\\text{leakyRelu}(x) = \\max(0, x) \u002B \\text{negativeSlope} * \\min(0, x)\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#like","title":"Tensor.like","content":"Tensor.like \nlike \n\n Returns a tensor from the .NET data in \u003Ccode\u003Evalue\u003C/code\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#linspaceLike","title":"Tensor.linspaceLike","content":"Tensor.linspaceLike \nlinspaceLike \n\n Returns a tensor in the manner of \u003Csee cref=\u0022M:Furnace.FurnaceImage.linspace\u0022/\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#linspaceLike","title":"Tensor.linspaceLike","content":"Tensor.linspaceLike \nlinspaceLike \n\n Returns a tensor in the manner of \u003Csee cref=\u0022M:Furnace.FurnaceImage.linspace\u0022/\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#log","title":"Tensor.log","content":"Tensor.log \nlog \nReturns a new tensor with the natural logarithm of the elements of input. \n \\[y_{i} = \\log_{e} (x_{i})\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#log10","title":"Tensor.log10","content":"Tensor.log10 \nlog10 \nReturns a new tensor with the logarithm to the base 10 of the elements of input. \n\\[y_{i} = \\log_{10} (x_{i})\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#logsoftmax","title":"Tensor.logsoftmax","content":"Tensor.logsoftmax \nlogsoftmax \nApplies a softmax followed by a logarithm."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#logspaceLike","title":"Tensor.logspaceLike","content":"Tensor.logspaceLike \nlogspaceLike \n\n Returns a tensor in the manner of \u003Csee cref=\u0022M:Furnace.FurnaceImage.logspace\u0022/\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#logspaceLike","title":"Tensor.logspaceLike","content":"Tensor.logspaceLike \nlogspaceLike \n\n Returns a tensor in the manner of \u003Csee cref=\u0022M:Furnace.FurnaceImage.logspace\u0022/\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#logsumexp","title":"Tensor.logsumexp","content":"Tensor.logsumexp \nlogsumexp \nApplies a logsumexp."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#lt","title":"Tensor.lt","content":"Tensor.lt \nlt \nComputes element-wise \\(a \u003C b\\), returning a boolean tensor containing a \u003Ccode\u003Etrue\u003C/code\u003E at each location where the comparison is true"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#matmul","title":"Tensor.matmul","content":"Tensor.matmul \nmatmul \nMatrix product of two tensors. \n\u003Cp class=\u0027fsdocs-para\u0027\u003E\n The behavior depends on the dimensionality of the tensors as follows:\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If both tensors are 1-dimensional, the dot product (scalar) is returned.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If both arguments are 2-dimensional, the matrix-matrix product is returned.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If the first argument is 1-dimensional and the second argument is 2-dimensional, a 1 is prepended to its dimension for the purpose of the matrix multiply. After the matrix multiply, the prepended dimension is removed.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If the first argument is 2-dimensional and the second argument is 1-dimensional, the matrix-vector product is returned.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N \u003E 2), then a \n batched matrix multiply is returned. If the first argument is 1-dimensional, a 1 is prepended to its dimension for the\n purpose of the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to\n its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (i.e. batch) dimensions\n are broadcasted (and thus must be broadcastable). For example, if input is a (j \\times 1 \\times n \\times m)(j\u00D71\u00D7n\u00D7m)\n tensor and other is a (k \\times m \\times p)(k\u00D7m\u00D7p) tensor, out will be an (j \\times k \\times n \\times p)(j\u00D7k\u00D7n\u00D7p)\n tensor.\n \u003C/p\u003E"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#max","title":"Tensor.max","content":"Tensor.max \nmax \n\n Returns the element-wise maximum of the elements in the two tensors.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#max","title":"Tensor.max","content":"Tensor.max \nmax \n\n Returns the maximum value of all elements in the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#max","title":"Tensor.max","content":"Tensor.max \nmax \n\n Returns the maximum value along the given dimension of all elements in the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxpool1d","title":"Tensor.maxpool1d","content":"Tensor.maxpool1d \nmaxpool1d \nApplies a 1D max pooling over an input signal composed of several input planes."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxpool1di","title":"Tensor.maxpool1di","content":"Tensor.maxpool1di \nmaxpool1di \nApplies a 1D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxpool2d","title":"Tensor.maxpool2d","content":"Tensor.maxpool2d \nmaxpool2d \nApplies a 2D max pooling over an input signal composed of several input planes."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxpool2di","title":"Tensor.maxpool2di","content":"Tensor.maxpool2di \nmaxpool2di \nApplies a 2D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxpool3d","title":"Tensor.maxpool3d","content":"Tensor.maxpool3d \nmaxpool3d \nApplies a 3D max pooling over an input signal composed of several input planes."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxpool3di","title":"Tensor.maxpool3di","content":"Tensor.maxpool3di \nmaxpool3di \nApplies a 3D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxunpool1d","title":"Tensor.maxunpool1d","content":"Tensor.maxunpool1d \nmaxunpool1d \nComputes a partial inverse of maxpool1di"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxunpool2d","title":"Tensor.maxunpool2d","content":"Tensor.maxunpool2d \nmaxunpool2d \nComputes a partial inverse of maxpool2di"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#maxunpool3d","title":"Tensor.maxunpool3d","content":"Tensor.maxunpool3d \nmaxunpool3d \nComputes a partial inverse of maxpool3di"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#mean","title":"Tensor.mean","content":"Tensor.mean \nmean \nReturns the mean value of each row of the input tensor in the given dimension dim. \nIf keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#mean","title":"Tensor.mean","content":"Tensor.mean \nmean \nReturns the mean value of all elements in the input tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#min","title":"Tensor.min","content":"Tensor.min \nmin \n\n Returns the element-wise minimum of the elements in the two tensors.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#min","title":"Tensor.min","content":"Tensor.min \nmin \n\n Returns the minimum value of all elements in the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#min","title":"Tensor.min","content":"Tensor.min \nmin \n\n Returns the minimum value along the given dimension of all elements in the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#move","title":"Tensor.move","content":"Tensor.move \nmove \n\n Returns a new tensor with the same contents moved to the given configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#move","title":"Tensor.move","content":"Tensor.move \nmove \n\n Returns a new tensor with the same contents moved to the given device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#move","title":"Tensor.move","content":"Tensor.move \nmove \n\n Returns a new tensor with the same contents moved to the given backend\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#mseLoss","title":"Tensor.mseLoss","content":"Tensor.mseLoss \nmseLoss \nCreates a criterion that measures the mean squared error (squared L2 norm) between each element in the input and the target."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#mul","title":"Tensor.mul","content":"Tensor.mul \nmul \nMultiplies each element of the object tensor by the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#mul","title":"Tensor.mul","content":"Tensor.mul \nmul \nMultiplies each element of the object tensor by the corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#multinomial","title":"Tensor.multinomial","content":"Tensor.multinomial \nmultinomial \nReturns a tensor where each row contains numSamples indices sampled from the multinomial probability distribution located in the corresponding row of tensor input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#ne","title":"Tensor.ne","content":"Tensor.ne \nne \nComputes element-wise \\(a \\neq b\\), returning a boolean tensor containing a \u003Ccode\u003Etrue\u003C/code\u003E at each location where the comparison is true"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#neg","title":"Tensor.neg","content":"Tensor.neg \nneg \nReturns a new tensor with the negative of the elements of the object tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#nllLoss","title":"Tensor.nllLoss","content":"Tensor.nllLoss \nnllLoss \nThe negative log likelihood loss."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#noDiff","title":"Tensor.noDiff","content":"Tensor.noDiff \nnoDiff \n\n Returns the input tensor but with any support for automatic differentiation removed.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#normalize","title":"Tensor.normalize","content":"Tensor.normalize \nnormalize \n\n Returns the tensor after min-max scaling\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#oneLike","title":"Tensor.oneLike","content":"Tensor.oneLike \noneLike \n\n Returns a scalar \u00271\u0027 tensor for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#onehotLike","title":"Tensor.onehotLike","content":"Tensor.onehotLike \nonehotLike \n\n Returns a tensor in the manner of \u003Csee cref=\u0022M:Furnace.FurnaceImage.onehot\u0022/\u003E for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#onesLike","title":"Tensor.onesLike","content":"Tensor.onesLike \nonesLike \n\n Returns a new tensor filled with \u00271\u0027 values for the given shape, element type and configuration, defaulting to the \n shape and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#pad","title":"Tensor.pad","content":"Tensor.pad \npad \nAdd zero padding to each side of a tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#permute","title":"Tensor.permute","content":"Tensor.permute \npermute \nReturns the original tensor with its dimensions permuted."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#pow","title":"Tensor.pow","content":"Tensor.pow \npow \nRaises each element of the self tensor to the power of the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#pow","title":"Tensor.pow","content":"Tensor.pow \npow \nRaises each element of the self tensor to the power of each corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#randLike","title":"Tensor.randLike","content":"Tensor.randLike \nrandLike \n\n Returns a new tensor with random values drawn from the uniform distribution [0,1) for the\n given shape, element type and configuration, defaulting to the shape and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#randintLike","title":"Tensor.randintLike","content":"Tensor.randintLike \nrandintLike \n\n Returns a new tensor with random integer values drawn from the given range, for the\n given shape, element type and configuration, defaulting to the shape and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#randnLike","title":"Tensor.randnLike","content":"Tensor.randnLike \nrandnLike \n\n Returns a new tensor with random values drawn from the standard normal distribution, for the\n given shape, element type and configuration, defaulting to the shape and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#relu","title":"Tensor.relu","content":"Tensor.relu \nrelu \nApplies the rectified linear unit function element-wise."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#repeat","title":"Tensor.repeat","content":"Tensor.repeat \nrepeat \nRepeat elements of a tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#reverse","title":"Tensor.reverse","content":"Tensor.reverse \nreverse \nPropagate the reverse-mode derivative backwards in the computation graph, starting from this tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#reverseDiff","title":"Tensor.reverseDiff","content":"Tensor.reverseDiff \nreverseDiff \n\n Returns the input tensor with added support for reverse-mode automatic differentiation.\n \n\n Any tensors produced using this tensor will also support reverse-mode propagation. After the completion\n of the corresponding \u003Ccode\u003Ereverse\u003C/code\u003E operation on the overall result tensor, the computed derivative\n will be available. \n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#reversePush","title":"Tensor.reversePush","content":"Tensor.reversePush \nreversePush \nPush the given value as part of the reverse-mode computation at the given output tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#reverseReset","title":"Tensor.reverseReset","content":"Tensor.reverseReset \nreverseReset \nReset the reverse mode computation graph associated with the given output tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#round","title":"Tensor.round","content":"Tensor.round \nround \nReturns a new tensor with each of the elements of input rounded to the closest integer. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#safelog","title":"Tensor.safelog","content":"Tensor.safelog \nsafelog \nReturns the logarithm of the tensor after clamping the tensor so that all its elements are greater than epsilon. This is to avoid a -inf result for elements equal to zero."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#save","title":"Tensor.save","content":"Tensor.save \nsave \nSaves the tensor to the given file using a bespoke binary format. \n\n The binary format records the elements, backend, element type and shape. It does not record the device.\n The format used may change from version to version of Furnace.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#scalarLike","title":"Tensor.scalarLike","content":"Tensor.scalarLike \nscalarLike \n\n Returns a new scalar tensor for the given shape, element type and configuration, defaulting to the \n shape and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#scatter","title":"Tensor.scatter","content":"Tensor.scatter \nscatter \nScatter values along an axis specified by dim."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sigmoid","title":"Tensor.sigmoid","content":"Tensor.sigmoid \nsigmoid \nApplies the sigmoid element-wise function \n\\[\\text{sigmoid}(x) = \\frac{1}{1 \u002B \\exp(-x)}\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sign","title":"Tensor.sign","content":"Tensor.sign \nsign \nReturns a new tensor with the signs of the elements of input. \nThe tensor will have the same element type as the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sin","title":"Tensor.sin","content":"Tensor.sin \nsin \nReturns a new tensor with the sine of the elements of input"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sinh","title":"Tensor.sinh","content":"Tensor.sinh \nsinh \nReturns a new tensor with the hyperbolic sine of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#softmax","title":"Tensor.softmax","content":"Tensor.softmax \nsoftmax \nApplies a softmax function. \nSoftmax is defined as: \\text{softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#softplus","title":"Tensor.softplus","content":"Tensor.softplus \nsoftplus \nApplies the softplus function element-wise. \n\\[\\text{softplus}(x) = \\frac{1}{\\beta} * \\log(1 \u002B \\exp(\\beta * x))\\]"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#split","title":"Tensor.split","content":"Tensor.split \nsplit \nSplits the tensor into chunks. Each chunk is a view of the original tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sqrt","title":"Tensor.sqrt","content":"Tensor.sqrt \nsqrt \nReturns a new tensor with the square-root of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#squeeze","title":"Tensor.squeeze","content":"Tensor.squeeze \nsqueeze \nReturns a tensor with all the dimensions of input of size 1 removed. \nIf the tensor has a batch dimension of size 1, then squeeze(input) will also remove the batch dimension, which can lead to unexpected errors."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#standardize","title":"Tensor.standardize","content":"Tensor.standardize \nstandardize \n\n Returns the tensor after standardization (z-score normalization)\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#std","title":"Tensor.std","content":"Tensor.std \nstd \nReturns the standard deviation of all elements in the input tensor. \nIf unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#std","title":"Tensor.std","content":"Tensor.std \nstd \nReturns the standard deviation of each row of the input tensor in the given dimension dim. \n\u003Cp class=\u0027fsdocs-para\u0027\u003EIf keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension(s).\u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003EIf unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used.\u003C/p\u003E"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sub","title":"Tensor.sub","content":"Tensor.sub \nsub \nSubtracts the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E from the corresponding element of the object tensor. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sub","title":"Tensor.sub","content":"Tensor.sub \nsub \nSubtracts each element of the object tensor from the corresponding element of the self tensor. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sum","title":"Tensor.sum","content":"Tensor.sum \nsum \nReturns the sum of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. \nIf keepdim is \u003Ccode\u003Etrue\u003C/code\u003E, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sum","title":"Tensor.sum","content":"Tensor.sum \nsum \nReturns the sum of all elements in the input tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#sumToSize","title":"Tensor.sumToSize","content":"Tensor.sumToSize \nsumToSize \nSum this tensor to size \u003Cspan class=\u0022fsdocs-param-name\u0022\u003EnewShape\u003C/span\u003E, which must be broadcastable to this tensor size."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#summary","title":"Tensor.summary","content":"Tensor.summary \nsummary \n\n Returns a string summarising the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#tan","title":"Tensor.tan","content":"Tensor.tan \ntan \nReturns a new tensor with the tangent of the elements of input"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#tanh","title":"Tensor.tanh","content":"Tensor.tanh \ntanh \nReturns a new tensor with the hyperbolic tangent of the elements of input."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toArray","title":"Tensor.toArray","content":"Tensor.toArray \ntoArray \n\n Returns the value of a (non-scalar) tensor as an array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toArray1D","title":"Tensor.toArray1D","content":"Tensor.toArray1D \ntoArray1D \n\n Returns the value of a 1D tensor as a 1D array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toArray2D","title":"Tensor.toArray2D","content":"Tensor.toArray2D \ntoArray2D \n\n Returns the value of a 2D tensor as a 2D array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toArray3D","title":"Tensor.toArray3D","content":"Tensor.toArray3D \ntoArray3D \n\n Returns the value of a 3D tensor as a 3D array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toArray4D","title":"Tensor.toArray4D","content":"Tensor.toArray4D \ntoArray4D \n\n Returns the value of a 4D tensor as a 4D array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toArray5D","title":"Tensor.toArray5D","content":"Tensor.toArray5D \ntoArray5D \n\n Returns the value of a 5D tensor as a 5D array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toArray6D","title":"Tensor.toArray6D","content":"Tensor.toArray6D \ntoArray6D \n\n Returns the value of a 6D tensor as a 6D array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toBool","title":"Tensor.toBool","content":"Tensor.toBool \ntoBool \n\n Convert a scalar tensor to a boolean value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toByte","title":"Tensor.toByte","content":"Tensor.toByte \ntoByte \n\n Convert a scalar tensor to a byte value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toDouble","title":"Tensor.toDouble","content":"Tensor.toDouble \ntoDouble \n\n Convert a scalar tensor to a float64 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toImage","title":"Tensor.toImage","content":"Tensor.toImage \ntoImage \nConvert tensor to an image tensor with shape Channels x Height x Width"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toImageString","title":"Tensor.toImageString","content":"Tensor.toImageString \ntoImageString \nConvert tensor to a grayscale image tensor and return a string representation approximating grayscale values"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toInt16","title":"Tensor.toInt16","content":"Tensor.toInt16 \ntoInt16 \n\n Convert a scalar tensor to an int16 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toInt32","title":"Tensor.toInt32","content":"Tensor.toInt32 \ntoInt32 \n\n Convert a scalar tensor to an int32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toInt64","title":"Tensor.toInt64","content":"Tensor.toInt64 \ntoInt64 \n\n Convert a scalar tensor to an int64 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toSByte","title":"Tensor.toSByte","content":"Tensor.toSByte \ntoSByte \n\n Convert a scalar tensor to a signed byte value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toScalar","title":"Tensor.toScalar","content":"Tensor.toScalar \ntoScalar \n\n Returns the value of a scalar tensor as an object\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#toSingle","title":"Tensor.toSingle","content":"Tensor.toSingle \ntoSingle \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#trace","title":"Tensor.trace","content":"Tensor.trace \ntrace \nReturns the sum of the elements of the diagonal of the input 2-D matrix."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#transpose","title":"Tensor.transpose","content":"Tensor.transpose \ntranspose \nReturns a tensor that is a transposed version of input with dimensions 0 and 1 swapped."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#transpose","title":"Tensor.transpose","content":"Tensor.transpose \ntranspose \nReturns a tensor that is a transposed version of input. The given dimensions dim0 and dim1 are swapped."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#undilate","title":"Tensor.undilate","content":"Tensor.undilate \nundilate \nReverse the dilation of the tensor in using the given dilations in each corresponding dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#unflatten","title":"Tensor.unflatten","content":"Tensor.unflatten \nunflatten \nUnflattens a tensor dimension by expanding it to the given shape."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#unsqueeze","title":"Tensor.unsqueeze","content":"Tensor.unsqueeze \nunsqueeze \nReturns a new tensor with a dimension of size one inserted at the specified position"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#unsqueezeAs","title":"Tensor.unsqueezeAs","content":"Tensor.unsqueezeAs \nunsqueezeAs \nReturns a new tensor with dimensions of size one appended to the end until the number of dimensions is the same as the other tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#unstack","title":"Tensor.unstack","content":"Tensor.unstack \nunstack \nRemoves a tensor dimension."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#var","title":"Tensor.var","content":"Tensor.var \nvar \nReturns the variance of each row of the input tensor in the given dimension dim. \n\u003Cp class=\u0027fsdocs-para\u0027\u003EIf keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension(s).\u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003EIf unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used.\u003C/p\u003E"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#var","title":"Tensor.var","content":"Tensor.var \nvar \nReturns the variance of all elements in the input tensor. \nIf unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel\u2019s correction will be used."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#view","title":"Tensor.view","content":"Tensor.view \nview \nReturns a new tensor with the same data as the object tensor but of a different shape. \n\n The returned tensor shares the same data and must have the same number of elements, but may have a different size. \n For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension,\n or only span across original dimensions \\(d, d\u002B1, \\dots, d\u002Bkd,d\u002B1,\u2026,d\u002Bk\\) that satisfy the following contiguity-like condition that\n \\(\\forall i = d, \\dots, d\u002Bk-1\u2200i=d,\u2026,d\u002Bk\u22121 ,\\) \\[\\text{stride}[i] = \\text{stride}[i\u002B1] \\times \\text{size}[i\u002B1]\\]\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#view","title":"Tensor.view","content":"Tensor.view \nview \nReturns a new tensor with the same data as the self tensor but of a different shape. \n\n The returned tensor shares the same data and must have the same number of elements, but may have a different size. \n For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension,\n or only span across original dimensions \\(d, d\u002B1, \\dots, d\u002Bkd,d\u002B1,\u2026,d\u002Bk\\) that satisfy the following contiguity-like condition that\n \\(\\forall i = d, \\dots, d\u002Bk-1\u2200i=d,\u2026,d\u002Bk\u22121 ,\\) \\[\\text{stride}[i] = \\text{stride}[i\u002B1] \\times \\text{size}[i\u002B1]\\]\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#viewAs","title":"Tensor.viewAs","content":"Tensor.viewAs \nviewAs \nView this tensor as the same size as other. \nThe returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size.\n The returned tensor shares the same data and must have the same number of elements, but may have a different size. \n For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension,\n or only span across original dimensions \\(d, d\u002B1, \\dots, d\u002Bkd,d\u002B1,\u2026,d\u002Bk\\) that satisfy the following contiguity-like condition that\n \\(\\forall i = d, \\dots, d\u002Bk-1\u2200i=d,\u2026,d\u002Bk\u22121 ,\\) \\[\\text{stride}[i] = \\text{stride}[i\u002B1] \\times \\text{size}[i\u002B1]\\]\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#zeroLike","title":"Tensor.zeroLike","content":"Tensor.zeroLike \nzeroLike \n\n Returns a scalar \u00270\u0027 tensor for the given element type and configuration, defaulting to\n the element type and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#zerosLike","title":"Tensor.zerosLike","content":"Tensor.zerosLike \nzerosLike \n\n Returns a new tensor filled with \u00270\u0027 values for the given shape, element type and configuration, defaulting to the \n shape and configuration of the input tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#backend","title":"Tensor.backend","content":"Tensor.backend \nbackend \n\n Gets the backend of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#primalDeep","title":"Tensor.primalDeep","content":"Tensor.primalDeep \nprimalDeep \n\n Gets the value of the tensor ignoring all its derivatives\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#elementSize","title":"Tensor.elementSize","content":"Tensor.elementSize \nelementSize \n\n Returns the size in bytes of an individual element in this tensor. Depending on dtype, backend configuration, this is not guaranteed to be correct and can behave differently in different runtime environments.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#isNoDiff","title":"Tensor.isNoDiff","content":"Tensor.isNoDiff \nisNoDiff \n\n Indicates if a tensor is a constant, meaning that it is not taking part in forward or reverse-mode differentiation\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#nestingTag","title":"Tensor.nestingTag","content":"Tensor.nestingTag \nnestingTag \n\n Gets the differentiation nesting tag of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Item","title":"Tensor.Item","content":"Tensor.Item \nItem \nGet the item at the given index as a scalar tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#deviceType","title":"Tensor.deviceType","content":"Tensor.deviceType \ndeviceType \n\n Gets the device type of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#isForwardDiff","title":"Tensor.isForwardDiff","content":"Tensor.isForwardDiff \nisForwardDiff \n\n Indicates if a tensor is taking part in forward-mode differentiation\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#depth","title":"Tensor.depth","content":"Tensor.depth \ndepth \n\n Gets the differentiation depth of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#fanout","title":"Tensor.fanout","content":"Tensor.fanout \nfanout \n\n Gets the fanout of a tensor used in reverse-mode differentiation\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#device","title":"Tensor.device","content":"Tensor.device \ndevice \n\n Gets the device of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#nelement","title":"Tensor.nelement","content":"Tensor.nelement \nnelement \n\n Gets the number of elements in the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#memorySize","title":"Tensor.memorySize","content":"Tensor.memorySize \nmemorySize \n\n Returns the size in bytes of the total memory used by this tensor. Depending on dtype, backend configuration, this is not guaranteed to be correct and can behave differently in different runtime environments.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#primal","title":"Tensor.primal","content":"Tensor.primal \nprimal \n\n Gets the value of the tensor ignoring its first derivative\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#derivative","title":"Tensor.derivative","content":"Tensor.derivative \nderivative \n\n Gets or sets the derivative of a tensor used in differentiation\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#isReverseDiff","title":"Tensor.isReverseDiff","content":"Tensor.isReverseDiff \nisReverseDiff \n\n Indicates if a tensor is taking part in reverse-mode differentiation\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#shape","title":"Tensor.shape","content":"Tensor.shape \nshape \n\n Gets the shape of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#dim","title":"Tensor.dim","content":"Tensor.dim \ndim \n\n Gets the number of dimensions of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#parentOp","title":"Tensor.parentOp","content":"Tensor.parentOp \nparentOp \n\n Gets the parent operation of a tensor used in reverse-mode differentiation\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#dtype","title":"Tensor.dtype","content":"Tensor.dtype \ndtype \n\n Gets the element type of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#derivativeDeep","title":"Tensor.derivativeDeep","content":"Tensor.derivativeDeep \nderivativeDeep \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#primalRaw","title":"Tensor.primalRaw","content":"Tensor.primalRaw \nprimalRaw \n\n Gets the raw value of the tensor ignoring all its derivatives\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Abs","title":"Tensor.Abs","content":"Tensor.Abs \nAbs \nA method to enable the use of the F# function \u003Ccode\u003Eabs\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Acos","title":"Tensor.Acos","content":"Tensor.Acos \nAcos \nA method to enable the use of the F# function \u003Ccode\u003Eacos\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Asin","title":"Tensor.Asin","content":"Tensor.Asin \nAsin \nA method to enable the use of the F# function \u003Ccode\u003Easin\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Atan","title":"Tensor.Atan","content":"Tensor.Atan \nAtan \nA method to enable the use of the F# function \u003Ccode\u003Eatan\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Ceiling","title":"Tensor.Ceiling","content":"Tensor.Ceiling \nCeiling \nA method to enable the use of the F# function \u003Ccode\u003Eceil\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Cos","title":"Tensor.Cos","content":"Tensor.Cos \nCos \nA method to enable the use of the F# function \u003Ccode\u003Ecos\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Cosh","title":"Tensor.Cosh","content":"Tensor.Cosh \nCosh \nA method to enable the use of the F# function \u003Ccode\u003Ecosh\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Exp","title":"Tensor.Exp","content":"Tensor.Exp \nExp \nA method to enable the use of the F# function \u003Ccode\u003Eexp\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Floor","title":"Tensor.Floor","content":"Tensor.Floor \nFloor \nA method to enable the use of the F# function \u003Ccode\u003Efloor\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Log","title":"Tensor.Log","content":"Tensor.Log \nLog \nA method to enable the use of the F# function \u003Ccode\u003Elog\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Log10","title":"Tensor.Log10","content":"Tensor.Log10 \nLog10 \nA method to enable the use of the F# function \u003Ccode\u003Elog10\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Op","title":"Tensor.Op","content":"Tensor.Op \nOp \nAllows the definition of a new binary tensor op."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Op","title":"Tensor.Op","content":"Tensor.Op \nOp \nAllows the definition of a new unary tensor op."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Pow","title":"Tensor.Pow","content":"Tensor.Pow \nPow \nRaises the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E to the power of each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Pow","title":"Tensor.Pow","content":"Tensor.Pow \nPow \nRaises the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E to the power of each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Pow","title":"Tensor.Pow","content":"Tensor.Pow \nPow \nRaises the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E to the power of each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Pow","title":"Tensor.Pow","content":"Tensor.Pow \nPow \nRaises each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E to the power of the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Pow","title":"Tensor.Pow","content":"Tensor.Pow \nPow \nRaises each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E to the power of the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Pow","title":"Tensor.Pow","content":"Tensor.Pow \nPow \nRaises each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E to the power of the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Pow","title":"Tensor.Pow","content":"Tensor.Pow \nPow \nRaises each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E to the power of the corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Round","title":"Tensor.Round","content":"Tensor.Round \nRound \nA method to enable the use of the F# function \u003Ccode\u003Eround\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Sin","title":"Tensor.Sin","content":"Tensor.Sin \nSin \nA method to enable the use of the F# function \u003Ccode\u003Esin\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Sinh","title":"Tensor.Sinh","content":"Tensor.Sinh \nSinh \nA method to enable the use of the F# function \u003Ccode\u003Esinh\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Sqrt","title":"Tensor.Sqrt","content":"Tensor.Sqrt \nSqrt \nA method to enable the use of the F# function \u003Ccode\u003Esqrt\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Tan","title":"Tensor.Tan","content":"Tensor.Tan \nTan \nA method to enable the use of the F# function \u003Ccode\u003Etan\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Tanh","title":"Tensor.Tanh","content":"Tensor.Tanh \nTanh \nA method to enable the use of the F# function \u003Ccode\u003Etanh\u003C/code\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#cat","title":"Tensor.cat","content":"Tensor.cat \ncat \nConcatenates the given sequence of seq tensors in the given dimension. \nAll tensors must either have the same shape (except in the concatenating dimension) or be empty."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#create","title":"Tensor.create","content":"Tensor.create \ncreate \n\n Creates a new tensor from the given data, using the given element type and configuration.\n \nThe fastest creation technique is a one dimensional array matching the desired dtype. Then use \u0027view\u0027 to reshape."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#eye","title":"Tensor.eye","content":"Tensor.eye \neye \nReturns a 2-D tensor with ones on the diagonal and zeros elsewhere."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#load","title":"Tensor.load","content":"Tensor.load \nload \nLoads the tensor from the given file using the given element type and configuration. \n\n The backend at the time of saving the tensor must be available when the tensor is reloaded.\n The tensor is first loaded into that backend and then moved. As a result, intermediate tensors may be created\n in the process of reloading.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#ofRawTensor","title":"Tensor.ofRawTensor","content":"Tensor.ofRawTensor \nofRawTensor \n\n Creates a new tensor from the raw tensor.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(\u002B)","title":"Tensor.(\u002B)","content":"Tensor.(\u002B) \n(\u002B) \nThe scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E is added to each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(\u002B)","title":"Tensor.(\u002B)","content":"Tensor.(\u002B) \n(\u002B) \nEach element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E is added to the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(\u002B)","title":"Tensor.(\u002B)","content":"Tensor.(\u002B) \n(\u002B) \nEach element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E is added to each corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(/)","title":"Tensor.(/)","content":"Tensor.(/) \n(/) \nDivides the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E by the each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(/)","title":"Tensor.(/)","content":"Tensor.(/) \n(/) \nDivides each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E by the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(/)","title":"Tensor.(/)","content":"Tensor.(/) \n(/) \nDivides each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E by the corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#op_Explicit","title":"Tensor.op_Explicit","content":"Tensor.op_Explicit \nop_Explicit \n\n Convert a scalar tensor to a float32 value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(--\u003E)","title":"Tensor.(--\u003E)","content":"Tensor.(--\u003E) \n(--\u003E) \nPipeline the tensor into a function."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(*)","title":"Tensor.(*)","content":"Tensor.(*) \n(*) \nMultiplies the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E by each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(*)","title":"Tensor.(*)","content":"Tensor.(*) \n(*) \nMultiplies each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E by the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(*)","title":"Tensor.(*)","content":"Tensor.(*) \n(*) \nMultiplies each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E by the corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(-)","title":"Tensor.(-)","content":"Tensor.(-) \n(-) \nSubtracts each element of the tensore \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E from the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(-)","title":"Tensor.(-)","content":"Tensor.(-) \n(-) \nSubtracts the scalar \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E from the corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E. The resulting tensor is returned."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(-)","title":"Tensor.(-)","content":"Tensor.(-) \n(-) \nSubtracts each element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Eb\u003C/span\u003E from the corresponding element of the tensor \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E. The resulting tensor is returned. \nThe shapes of the two tensors must be broadcastable."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#(~-)","title":"Tensor.(~-)","content":"Tensor.(~-) \n(~-) \nReturns a new tensor with the negative of the elements of \u003Cspan class=\u0022fsdocs-param-name\u0022\u003Ea\u003C/span\u003E."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#stack","title":"Tensor.stack","content":"Tensor.stack \nstack \nConcatenates sequence of tensors along a new dimension. \nAll tensors need to be of the same shape."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Zero","title":"Tensor.Zero","content":"Tensor.Zero \nZero \n\n Get the scalar zero tensor for the current configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#One","title":"Tensor.One","content":"Tensor.One \nOne \n\n Get the scalar one tensor for the current configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html","title":"TensorOp","content":"TensorOp \n \nTensorOp.AddTT \nAddTT \nTensorOp.AddTTConst \nAddTTConst \nTensorOp.AddTT0 \nAddTT0 \nTensorOp.AddTT0Const \nAddTT0Const \nTensorOp.SubTT \nSubTT \nTensorOp.SubTTConst \nSubTTConst \nTensorOp.SubTConstT \nSubTConstT \nTensorOp.SubTT0 \nSubTT0 \nTensorOp.SubTT0Const \nSubTT0Const \nTensorOp.SubT0ConstT \nSubT0ConstT \nTensorOp.MulTT \nMulTT \nTensorOp.MulTTConst \nMulTTConst \nTensorOp.MulTT0 \nMulTT0 \nTensorOp.MulTT0Const \nMulTT0Const \nTensorOp.MulTConstT0 \nMulTConstT0 \nTensorOp.DivTT \nDivTT \nTensorOp.DivTTConst \nDivTTConst \nTensorOp.DivTConstT \nDivTConstT \nTensorOp.DivT0T \nDivT0T \nTensorOp.DivT0ConstT \nDivT0ConstT \nTensorOp.DivTT0 \nDivTT0 \nTensorOp.DivTT0Const \nDivTT0Const \nTensorOp.PowTT \nPowTT \nTensorOp.PowTTConst \nPowTTConst \nTensorOp.PowTConstT \nPowTConstT \nTensorOp.PowT0ConstT \nPowT0ConstT \nTensorOp.PowTT0Const \nPowTT0Const \nTensorOp.MatMulTT \nMatMulTT \nTensorOp.MatMulTTConst \nMatMulTTConst \nTensorOp.MatMulTConstT \nMatMulTConstT \nTensorOp.MaxPool1DT \nMaxPool1DT \nTensorOp.MaxUnpool1DT \nMaxUnpool1DT \nTensorOp.MaxPool2DT \nMaxPool2DT \nTensorOp.MaxUnpool2DT \nMaxUnpool2DT \nTensorOp.MaxPool3DT \nMaxPool3DT \nTensorOp.MaxUnpool3DT \nMaxUnpool3DT \nTensorOp.Conv1DTT \nConv1DTT \nTensorOp.Conv1DTTConst \nConv1DTTConst \nTensorOp.Conv1DTConstT \nConv1DTConstT \nTensorOp.Conv2DTT \nConv2DTT \nTensorOp.Conv2DTTConst \nConv2DTTConst \nTensorOp.Conv2DTConstT \nConv2DTConstT \nTensorOp.Conv3DTT \nConv3DTT \nTensorOp.Conv3DTTConst \nConv3DTTConst \nTensorOp.Conv3DTConstT \nConv3DTConstT \nTensorOp.AddTTSlice \nAddTTSlice \nTensorOp.AddTTConstSlice \nAddTTConstSlice \nTensorOp.AddTConstTSlice \nAddTConstTSlice \nTensorOp.NegT \nNegT \nTensorOp.SumT \nSumT \nTensorOp.SumTDim \nSumTDim \nTensorOp.ExpandT \nExpandT \nTensorOp.StackTs \nStackTs \nTensorOp.UnstackT \nUnstackT \nTensorOp.CatTs \nCatTs \nTensorOp.SplitT \nSplitT \nTensorOp.SliceT \nSliceT \nTensorOp.GatherT \nGatherT \nTensorOp.ScatterT \nScatterT \nTensorOp.PermuteT \nPermuteT \nTensorOp.TransposeT \nTransposeT \nTensorOp.TransposeT2 \nTransposeT2 \nTensorOp.SqueezeT \nSqueezeT \nTensorOp.UnsqueezeT \nUnsqueezeT \nTensorOp.FlipT \nFlipT \nTensorOp.DilateT \nDilateT \nTensorOp.UndilateT \nUndilateT \nTensorOp.ViewT \nViewT \nTensorOp.ClampT \nClampT \nTensorOp.SignT \nSignT \nTensorOp.FloorT \nFloorT \nTensorOp.CeilT \nCeilT \nTensorOp.RoundT \nRoundT \nTensorOp.AbsT \nAbsT \nTensorOp.ReluT \nReluT \nTensorOp.SoftplusT \nSoftplusT \nTensorOp.SigmoidT \nSigmoidT \nTensorOp.ExpT \nExpT \nTensorOp.LogT \nLogT \nTensorOp.Log10T \nLog10T \nTensorOp.SqrtT \nSqrtT \nTensorOp.SinT \nSinT \nTensorOp.CosT \nCosT \nTensorOp.TanT \nTanT \nTensorOp.SinhT \nSinhT \nTensorOp.CoshT \nCoshT \nTensorOp.TanhT \nTanhT \nTensorOp.AsinT \nAsinT \nTensorOp.AcosT \nAcosT \nTensorOp.AtanT \nAtanT \nTensorOp.NewT \nNewT \nTensorOp.OpUnaryT \nOpUnaryT \nTensorOp.OpBinaryTT \nOpBinaryTT \nTensorOp.OpBinaryTC \nOpBinaryTC \nTensorOp.OpBinaryCT \nOpBinaryCT"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AddTT","title":"TensorOp.AddTT","content":"TensorOp.AddTT \nAddTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AddTTConst","title":"TensorOp.AddTTConst","content":"TensorOp.AddTTConst \nAddTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AddTT0","title":"TensorOp.AddTT0","content":"TensorOp.AddTT0 \nAddTT0 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AddTT0Const","title":"TensorOp.AddTT0Const","content":"TensorOp.AddTT0Const \nAddTT0Const \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SubTT","title":"TensorOp.SubTT","content":"TensorOp.SubTT \nSubTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SubTTConst","title":"TensorOp.SubTTConst","content":"TensorOp.SubTTConst \nSubTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SubTConstT","title":"TensorOp.SubTConstT","content":"TensorOp.SubTConstT \nSubTConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SubTT0","title":"TensorOp.SubTT0","content":"TensorOp.SubTT0 \nSubTT0 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SubTT0Const","title":"TensorOp.SubTT0Const","content":"TensorOp.SubTT0Const \nSubTT0Const \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SubT0ConstT","title":"TensorOp.SubT0ConstT","content":"TensorOp.SubT0ConstT \nSubT0ConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MulTT","title":"TensorOp.MulTT","content":"TensorOp.MulTT \nMulTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MulTTConst","title":"TensorOp.MulTTConst","content":"TensorOp.MulTTConst \nMulTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MulTT0","title":"TensorOp.MulTT0","content":"TensorOp.MulTT0 \nMulTT0 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MulTT0Const","title":"TensorOp.MulTT0Const","content":"TensorOp.MulTT0Const \nMulTT0Const \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MulTConstT0","title":"TensorOp.MulTConstT0","content":"TensorOp.MulTConstT0 \nMulTConstT0 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DivTT","title":"TensorOp.DivTT","content":"TensorOp.DivTT \nDivTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DivTTConst","title":"TensorOp.DivTTConst","content":"TensorOp.DivTTConst \nDivTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DivTConstT","title":"TensorOp.DivTConstT","content":"TensorOp.DivTConstT \nDivTConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DivT0T","title":"TensorOp.DivT0T","content":"TensorOp.DivT0T \nDivT0T \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DivT0ConstT","title":"TensorOp.DivT0ConstT","content":"TensorOp.DivT0ConstT \nDivT0ConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DivTT0","title":"TensorOp.DivTT0","content":"TensorOp.DivTT0 \nDivTT0 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DivTT0Const","title":"TensorOp.DivTT0Const","content":"TensorOp.DivTT0Const \nDivTT0Const \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#PowTT","title":"TensorOp.PowTT","content":"TensorOp.PowTT \nPowTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#PowTTConst","title":"TensorOp.PowTTConst","content":"TensorOp.PowTTConst \nPowTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#PowTConstT","title":"TensorOp.PowTConstT","content":"TensorOp.PowTConstT \nPowTConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#PowT0ConstT","title":"TensorOp.PowT0ConstT","content":"TensorOp.PowT0ConstT \nPowT0ConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#PowTT0Const","title":"TensorOp.PowTT0Const","content":"TensorOp.PowTT0Const \nPowTT0Const \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MatMulTT","title":"TensorOp.MatMulTT","content":"TensorOp.MatMulTT \nMatMulTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MatMulTTConst","title":"TensorOp.MatMulTTConst","content":"TensorOp.MatMulTTConst \nMatMulTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MatMulTConstT","title":"TensorOp.MatMulTConstT","content":"TensorOp.MatMulTConstT \nMatMulTConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MaxPool1DT","title":"TensorOp.MaxPool1DT","content":"TensorOp.MaxPool1DT \nMaxPool1DT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MaxUnpool1DT","title":"TensorOp.MaxUnpool1DT","content":"TensorOp.MaxUnpool1DT \nMaxUnpool1DT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MaxPool2DT","title":"TensorOp.MaxPool2DT","content":"TensorOp.MaxPool2DT \nMaxPool2DT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MaxUnpool2DT","title":"TensorOp.MaxUnpool2DT","content":"TensorOp.MaxUnpool2DT \nMaxUnpool2DT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MaxPool3DT","title":"TensorOp.MaxPool3DT","content":"TensorOp.MaxPool3DT \nMaxPool3DT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#MaxUnpool3DT","title":"TensorOp.MaxUnpool3DT","content":"TensorOp.MaxUnpool3DT \nMaxUnpool3DT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv1DTT","title":"TensorOp.Conv1DTT","content":"TensorOp.Conv1DTT \nConv1DTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv1DTTConst","title":"TensorOp.Conv1DTTConst","content":"TensorOp.Conv1DTTConst \nConv1DTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv1DTConstT","title":"TensorOp.Conv1DTConstT","content":"TensorOp.Conv1DTConstT \nConv1DTConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv2DTT","title":"TensorOp.Conv2DTT","content":"TensorOp.Conv2DTT \nConv2DTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv2DTTConst","title":"TensorOp.Conv2DTTConst","content":"TensorOp.Conv2DTTConst \nConv2DTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv2DTConstT","title":"TensorOp.Conv2DTConstT","content":"TensorOp.Conv2DTConstT \nConv2DTConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv3DTT","title":"TensorOp.Conv3DTT","content":"TensorOp.Conv3DTT \nConv3DTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv3DTTConst","title":"TensorOp.Conv3DTTConst","content":"TensorOp.Conv3DTTConst \nConv3DTTConst \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Conv3DTConstT","title":"TensorOp.Conv3DTConstT","content":"TensorOp.Conv3DTConstT \nConv3DTConstT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AddTTSlice","title":"TensorOp.AddTTSlice","content":"TensorOp.AddTTSlice \nAddTTSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AddTTConstSlice","title":"TensorOp.AddTTConstSlice","content":"TensorOp.AddTTConstSlice \nAddTTConstSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AddTConstTSlice","title":"TensorOp.AddTConstTSlice","content":"TensorOp.AddTConstTSlice \nAddTConstTSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#NegT","title":"TensorOp.NegT","content":"TensorOp.NegT \nNegT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SumT","title":"TensorOp.SumT","content":"TensorOp.SumT \nSumT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SumTDim","title":"TensorOp.SumTDim","content":"TensorOp.SumTDim \nSumTDim \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#ExpandT","title":"TensorOp.ExpandT","content":"TensorOp.ExpandT \nExpandT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#StackTs","title":"TensorOp.StackTs","content":"TensorOp.StackTs \nStackTs \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#UnstackT","title":"TensorOp.UnstackT","content":"TensorOp.UnstackT \nUnstackT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#CatTs","title":"TensorOp.CatTs","content":"TensorOp.CatTs \nCatTs \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SplitT","title":"TensorOp.SplitT","content":"TensorOp.SplitT \nSplitT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SliceT","title":"TensorOp.SliceT","content":"TensorOp.SliceT \nSliceT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#GatherT","title":"TensorOp.GatherT","content":"TensorOp.GatherT \nGatherT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#ScatterT","title":"TensorOp.ScatterT","content":"TensorOp.ScatterT \nScatterT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#PermuteT","title":"TensorOp.PermuteT","content":"TensorOp.PermuteT \nPermuteT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#TransposeT","title":"TensorOp.TransposeT","content":"TensorOp.TransposeT \nTransposeT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#TransposeT2","title":"TensorOp.TransposeT2","content":"TensorOp.TransposeT2 \nTransposeT2 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SqueezeT","title":"TensorOp.SqueezeT","content":"TensorOp.SqueezeT \nSqueezeT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#UnsqueezeT","title":"TensorOp.UnsqueezeT","content":"TensorOp.UnsqueezeT \nUnsqueezeT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#FlipT","title":"TensorOp.FlipT","content":"TensorOp.FlipT \nFlipT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#DilateT","title":"TensorOp.DilateT","content":"TensorOp.DilateT \nDilateT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#UndilateT","title":"TensorOp.UndilateT","content":"TensorOp.UndilateT \nUndilateT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#ViewT","title":"TensorOp.ViewT","content":"TensorOp.ViewT \nViewT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#ClampT","title":"TensorOp.ClampT","content":"TensorOp.ClampT \nClampT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SignT","title":"TensorOp.SignT","content":"TensorOp.SignT \nSignT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#FloorT","title":"TensorOp.FloorT","content":"TensorOp.FloorT \nFloorT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#CeilT","title":"TensorOp.CeilT","content":"TensorOp.CeilT \nCeilT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#RoundT","title":"TensorOp.RoundT","content":"TensorOp.RoundT \nRoundT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AbsT","title":"TensorOp.AbsT","content":"TensorOp.AbsT \nAbsT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#ReluT","title":"TensorOp.ReluT","content":"TensorOp.ReluT \nReluT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SoftplusT","title":"TensorOp.SoftplusT","content":"TensorOp.SoftplusT \nSoftplusT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SigmoidT","title":"TensorOp.SigmoidT","content":"TensorOp.SigmoidT \nSigmoidT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#ExpT","title":"TensorOp.ExpT","content":"TensorOp.ExpT \nExpT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#LogT","title":"TensorOp.LogT","content":"TensorOp.LogT \nLogT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#Log10T","title":"TensorOp.Log10T","content":"TensorOp.Log10T \nLog10T \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SqrtT","title":"TensorOp.SqrtT","content":"TensorOp.SqrtT \nSqrtT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SinT","title":"TensorOp.SinT","content":"TensorOp.SinT \nSinT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#CosT","title":"TensorOp.CosT","content":"TensorOp.CosT \nCosT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#TanT","title":"TensorOp.TanT","content":"TensorOp.TanT \nTanT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#SinhT","title":"TensorOp.SinhT","content":"TensorOp.SinhT \nSinhT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#CoshT","title":"TensorOp.CoshT","content":"TensorOp.CoshT \nCoshT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#TanhT","title":"TensorOp.TanhT","content":"TensorOp.TanhT \nTanhT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AsinT","title":"TensorOp.AsinT","content":"TensorOp.AsinT \nAsinT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AcosT","title":"TensorOp.AcosT","content":"TensorOp.AcosT \nAcosT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#AtanT","title":"TensorOp.AtanT","content":"TensorOp.AtanT \nAtanT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#NewT","title":"TensorOp.NewT","content":"TensorOp.NewT \nNewT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#OpUnaryT","title":"TensorOp.OpUnaryT","content":"TensorOp.OpUnaryT \nOpUnaryT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#OpBinaryTT","title":"TensorOp.OpBinaryTT","content":"TensorOp.OpBinaryTT \nOpBinaryTT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#OpBinaryTC","title":"TensorOp.OpBinaryTC","content":"TensorOp.OpBinaryTC \nOpBinaryTC \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-tensorop.html#OpBinaryCT","title":"TensorOp.OpBinaryCT","content":"TensorOp.OpBinaryCT \nOpBinaryCT \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html","title":"UnaryOp","content":"UnaryOp \nDefines a new op implementing a unary function and its derivatives. Instances of this class are used with the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Op\u0022\u003ETensor.Op\u003C/a\u003E method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. \n\u003Cp class=\u0027fsdocs-para\u0027\u003EThis type represents the most generic definition of a new op representing a unary function, allowing the specification of: (1) the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html\u0022\u003ERawTensor\u003C/a\u003E operation, (2) the derivative propagation rule for the forward differentiation mode and (3) the derivative propagation rule for the reverse differentiation mode.\u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003EIn general, if you are implementing a simple elementwise op, you should prefer using the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-unaryopelementwise.html\u0022\u003EUnaryOpElementwise\u003C/a\u003E type, which is much simpler to use.\u003C/p\u003E \nUnaryOp.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nUnaryOp.ad_dfda \nad_dfda \nUnaryOp.fRaw \nfRaw \nUnaryOp.fd_dfda \nfd_dfda \nUnaryOp.name \nname"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html#\u0060\u0060.ctor\u0060\u0060","title":"UnaryOp.\u0060\u0060.ctor\u0060\u0060","content":"UnaryOp.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html#ad_dfda","title":"UnaryOp.ad_dfda","content":"UnaryOp.ad_dfda \nad_dfda \nDerivative propagation rule for forward differentiation mode. This represents the derivative of \\( f(a) \\) with respect a value \\( x \\) earlier in the computation graph than the function\u0027s argument \\( a \\). In other words, it computes \\( \\frac{\\partial f(a)}{\\partial x} = \\frac{\\partial a}{\\partial x} \\frac{\\partial f(a)}{\\partial a} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html#fRaw","title":"UnaryOp.fRaw","content":"UnaryOp.fRaw \nfRaw \nRawTensor operation \\( f(a) \\) performing the op."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html#fd_dfda","title":"UnaryOp.fd_dfda","content":"UnaryOp.fd_dfda \nfd_dfda \nDerivative propagation rule for reverse differentiation mode. This represents the derivative of a value \\( y \\), which comes later in the computation graph than the function\u0027s value \\( f(a) \\), with respect to the function\u0027s argument \\( a \\). In other words, it computes \\( \\frac{\\partial y}{\\partial a} = \\frac{\\partial y}{\\partial f(a)} \\frac{\\partial f(a)}{\\partial a} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html#name","title":"UnaryOp.name","content":"UnaryOp.name \nname \n\n Name of the op.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryopelementwise.html","title":"UnaryOpElementwise","content":"UnaryOpElementwise \nDefines a new op implementing an elementwise unary function and its derivatives. Instances of this class are used with the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#Op\u0022\u003ETensor.Op\u003C/a\u003E method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. \n\u003Cp class=\u0027fsdocs-para\u0027\u003EThis type is specialized to elementwise ops. It requires the user to specify only (1) the \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html\u0022\u003ERawTensor\u003C/a\u003E operation and (2) the derivative of the function with respect to its argument. The corresponding derivative propagation rules for the forward and reverse differentiation modes are automatically generated.\u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003EIf you are implementing a complex op that is not elementwise, you can use the generic type \u003Ca href=\u0022https://fsprojects.github.io/Furnace/reference/furnace-unaryop.html\u0022\u003EUnaryOp\u003C/a\u003E, which allows you to define the full derivative propagation rules.\u003C/p\u003E \nUnaryOpElementwise.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nUnaryOpElementwise.dfda \ndfda"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryopelementwise.html#\u0060\u0060.ctor\u0060\u0060","title":"UnaryOpElementwise.\u0060\u0060.ctor\u0060\u0060","content":"UnaryOpElementwise.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-unaryopelementwise.html#dfda","title":"UnaryOpElementwise.dfda","content":"UnaryOpElementwise.dfda \ndfda \nDerivative of the function with respect to its argument, \\( \\frac{\\partial f(a)}{\\partial a} \\)."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-scalar.html","title":"scalar","content":"scalar \n\n Represents a scalar on the Furnace programming model\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageextensions.html","title":"ImageExtensions","content":"ImageExtensions \n \nImageExtensions.saveImage \nsaveImage \nImageExtensions.loadImage \nloadImage \nImageExtensions.loadImage \nloadImage \nImageExtensions.saveImage \nsaveImage"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageextensions.html#saveImage","title":"ImageExtensions.saveImage","content":"ImageExtensions.saveImage \nsaveImage \nSave tensor to an image file using png or jpg format"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageextensions.html#loadImage","title":"ImageExtensions.loadImage","content":"ImageExtensions.loadImage \nloadImage \nLoad an image file and return it as a tensor"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageextensions.html#loadImage","title":"ImageExtensions.loadImage","content":"ImageExtensions.loadImage \nloadImage \nLoad an image file as a tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageextensions.html#saveImage","title":"ImageExtensions.saveImage","content":"ImageExtensions.saveImage \nsaveImage \nSave a given Tensor into an image file. \nIf the input tensor has 4 dimensions, then make a single image grid."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageutil.html","title":"ImageUtil","content":"ImageUtil \n \nImageUtil.saveImage \nsaveImage \nImageUtil.loadImage \nloadImage"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageutil.html#saveImage","title":"ImageUtil.saveImage","content":"ImageUtil.saveImage \nsaveImage \n\n Saves the given pixel array to a file and optionally resizes it in the process. Supports .png format.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-imageutil.html#loadImage","title":"ImageUtil.loadImage","content":"ImageUtil.loadImage \nloadImage \n\n Loads a pixel array from a file and optionally resizes it in the process.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html","title":"BackendTensorStatics","content":"BackendTensorStatics \n\n Represents the static functionality for tensors implemented by a Furnace backend.\n \nBackendTensorStatics.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBackendTensorStatics.CreateFromFlatArray \nCreateFromFlatArray \nBackendTensorStatics.Empty \nEmpty \nBackendTensorStatics.Full \nFull \nBackendTensorStatics.GetDevices \nGetDevices \nBackendTensorStatics.IsDeviceTypeAvailable \nIsDeviceTypeAvailable \nBackendTensorStatics.One \nOne \nBackendTensorStatics.Ones \nOnes \nBackendTensorStatics.Random \nRandom \nBackendTensorStatics.RandomInt \nRandomInt \nBackendTensorStatics.RandomNormal \nRandomNormal \nBackendTensorStatics.Seed \nSeed \nBackendTensorStatics.Zero \nZero \nBackendTensorStatics.Zeros \nZeros \nBackendTensorStatics.Get \nGet \nBackendTensorStatics.Seed \nSeed"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#\u0060\u0060.ctor\u0060\u0060","title":"BackendTensorStatics.\u0060\u0060.ctor\u0060\u0060","content":"BackendTensorStatics.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#CreateFromFlatArray","title":"BackendTensorStatics.CreateFromFlatArray","content":"BackendTensorStatics.CreateFromFlatArray \nCreateFromFlatArray \n\n Create a tensor of appropriate dtype from a scalar or array of appropriate values.\n A backend type is delivered consistent with in-memory data - a type for dtype Int32 gets int32 data etc.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Empty","title":"BackendTensorStatics.Empty","content":"BackendTensorStatics.Empty \nEmpty \n\n Gets a tensor filled with arbitrary values for the given shape and device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Full","title":"BackendTensorStatics.Full","content":"BackendTensorStatics.Full \nFull \n\n Gets a tensor filled with the given value for the given shape and device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#GetDevices","title":"BackendTensorStatics.GetDevices","content":"BackendTensorStatics.GetDevices \nGetDevices \n\n Gets the devices supported by this backend\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#IsDeviceTypeAvailable","title":"BackendTensorStatics.IsDeviceTypeAvailable","content":"BackendTensorStatics.IsDeviceTypeAvailable \nIsDeviceTypeAvailable \n\n Indicates if a device type is supported by this backend\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#One","title":"BackendTensorStatics.One","content":"BackendTensorStatics.One \nOne \n\n Gets the scalar 1 tensor for the given device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Ones","title":"BackendTensorStatics.Ones","content":"BackendTensorStatics.Ones \nOnes \n\n Gets a tensor filled with ones for the given shape and device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Random","title":"BackendTensorStatics.Random","content":"BackendTensorStatics.Random \nRandom \n\n Gets a tensor filled with random values for the given shape and device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#RandomInt","title":"BackendTensorStatics.RandomInt","content":"BackendTensorStatics.RandomInt \nRandomInt \n\n Gets a tensor filled with random integers from the given range for the given shape and device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#RandomNormal","title":"BackendTensorStatics.RandomNormal","content":"BackendTensorStatics.RandomNormal \nRandomNormal \n\n Gets a tensor filled with random values from the normal distribution for the given shape and device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Seed","title":"BackendTensorStatics.Seed","content":"BackendTensorStatics.Seed \nSeed \n\n Sets the seed for the default random number generator of the backend\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Zero","title":"BackendTensorStatics.Zero","content":"BackendTensorStatics.Zero \nZero \n\n Gets the scalar 0 tensor for the given device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Zeros","title":"BackendTensorStatics.Zeros","content":"BackendTensorStatics.Zeros \nZeros \n\n Gets a tensor filled with zeros for the given shape and device\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Get","title":"BackendTensorStatics.Get","content":"BackendTensorStatics.Get \nGet \n\n Get the backend implementation for the given tensor element type and backend.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-backendtensorstatics.html#Seed","title":"BackendTensorStatics.Seed","content":"BackendTensorStatics.Seed \nSeed \n\n Seed all backends with the given random seed, or a new seed based on the current time\n if no seed is specified.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html","title":"RawTensor","content":"RawTensor \n\n Represents a raw (i.e. non-differentiable immutable) tensor implemented by a Furnace backend.\n \n\n Each backend will provide one of more .NET implementations of this type, which may in turn\n wrap handles to native implementations.\n \nRawTensor.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nRawTensor.AbsInPlace \nAbsInPlace \nRawTensor.AbsT \nAbsT \nRawTensor.AcosInPlace \nAcosInPlace \nRawTensor.AcosT \nAcosT \nRawTensor.AddInPlace \nAddInPlace \nRawTensor.AddScalarInPlace \nAddScalarInPlace \nRawTensor.AddSliceInPlace \nAddSliceInPlace \nRawTensor.AddTT \nAddTT \nRawTensor.AddTT0 \nAddTT0 \nRawTensor.AddTTSlice \nAddTTSlice \nRawTensor.AllClose \nAllClose \nRawTensor.AsinInPlace \nAsinInPlace \nRawTensor.AsinT \nAsinT \nRawTensor.AtanInPlace \nAtanInPlace \nRawTensor.AtanT \nAtanT \nRawTensor.AvgPool1D \nAvgPool1D \nRawTensor.AvgPool2D \nAvgPool2D \nRawTensor.AvgPool3D \nAvgPool3D \nRawTensor.AvgPoolReverse1D \nAvgPoolReverse1D \nRawTensor.AvgPoolReverse2D \nAvgPoolReverse2D \nRawTensor.AvgPoolReverse3D \nAvgPoolReverse3D \nRawTensor.BMMTT \nBMMTT \nRawTensor.Cast \nCast \nRawTensor.CatTs \nCatTs \nRawTensor.CeilInPlace \nCeilInPlace \nRawTensor.CeilT \nCeilT \nRawTensor.ClampInPlace \nClampInPlace \nRawTensor.ClampT \nClampT \nRawTensor.Clone \nClone \nRawTensor.ComputeHash \nComputeHash \nRawTensor.Conv1D \nConv1D \nRawTensor.Conv2D \nConv2D \nRawTensor.Conv3D \nConv3D \nRawTensor.CosInPlace \nCosInPlace \nRawTensor.CosT \nCosT \nRawTensor.CoshInPlace \nCoshInPlace \nRawTensor.CoshT \nCoshT \nRawTensor.CreateLike \nCreateLike \nRawTensor.DetT \nDetT \nRawTensor.DilateT \nDilateT \nRawTensor.DivFromT0T \nDivFromT0T \nRawTensor.DivInPlace \nDivInPlace \nRawTensor.DivScalarInPlace \nDivScalarInPlace \nRawTensor.DivTT \nDivTT \nRawTensor.DivTT0 \nDivTT0 \nRawTensor.EmptyLike \nEmptyLike \nRawTensor.EqInPlace \nEqInPlace \nRawTensor.EqTT \nEqTT \nRawTensor.Equals \nEquals \nRawTensor.ExpInPlace \nExpInPlace \nRawTensor.ExpT \nExpT \nRawTensor.Expand \nExpand \nRawTensor.FlipT \nFlipT \nRawTensor.FloorInPlace \nFloorInPlace \nRawTensor.FloorT \nFloorT \nRawTensor.FullLike \nFullLike \nRawTensor.GatherT \nGatherT \nRawTensor.GeInPlace \nGeInPlace \nRawTensor.GeTT \nGeTT \nRawTensor.GetItem \nGetItem \nRawTensor.GetSlice \nGetSlice \nRawTensor.GtInPlace \nGtInPlace \nRawTensor.GtTT \nGtTT \nRawTensor.InverseT \nInverseT \nRawTensor.IsInfT \nIsInfT \nRawTensor.IsNaNT \nIsNaNT \nRawTensor.LeInPlace \nLeInPlace \nRawTensor.LeTT \nLeTT \nRawTensor.Log10InPlace \nLog10InPlace \nRawTensor.Log10T \nLog10T \nRawTensor.LogInPlace \nLogInPlace \nRawTensor.LogT \nLogT \nRawTensor.LtInPlace \nLtInPlace \nRawTensor.LtTT \nLtTT \nRawTensor.MatMulInPlace \nMatMulInPlace \nRawTensor.MatMulTT \nMatMulTT \nRawTensor.MaxIndexT \nMaxIndexT \nRawTensor.MaxPool1D \nMaxPool1D \nRawTensor.MaxPool2D \nMaxPool2D \nRawTensor.MaxPool3D \nMaxPool3D \nRawTensor.MaxReduceT \nMaxReduceT \nRawTensor.MaxUnpool1D \nMaxUnpool1D \nRawTensor.MaxUnpool2D \nMaxUnpool2D \nRawTensor.MaxUnpool3D \nMaxUnpool3D \nRawTensor.MinIndexT \nMinIndexT \nRawTensor.MinReduceT \nMinReduceT \nRawTensor.MoveTo \nMoveTo \nRawTensor.MulInPlace \nMulInPlace \nRawTensor.MulScalarInPlace \nMulScalarInPlace \nRawTensor.MulTT \nMulTT \nRawTensor.MulTT0 \nMulTT0 \nRawTensor.NegInPlace \nNegInPlace \nRawTensor.NegT \nNegT \nRawTensor.NeqInPlace \nNeqInPlace \nRawTensor.NeqTT \nNeqTT \nRawTensor.OneLike \nOneLike \nRawTensor.OnesInPlace \nOnesInPlace \nRawTensor.OnesLike \nOnesLike \nRawTensor.PermuteT \nPermuteT \nRawTensor.PowFromT0T \nPowFromT0T \nRawTensor.PowInPlace \nPowInPlace \nRawTensor.PowScalarInPlace \nPowScalarInPlace \nRawTensor.PowTT \nPowTT \nRawTensor.PowTT0 \nPowTT0 \nRawTensor.Print \nPrint \nRawTensor.RandomInPlace \nRandomInPlace \nRawTensor.RandomIntInPlace \nRandomIntInPlace \nRawTensor.RandomIntLike \nRandomIntLike \nRawTensor.RandomLike \nRandomLike \nRawTensor.RandomNormalInPlace \nRandomNormalInPlace \nRawTensor.RandomNormalLike \nRandomNormalLike \nRawTensor.ReluInPlace \nReluInPlace \nRawTensor.ReluT \nReluT \nRawTensor.RoundInPlace \nRoundInPlace \nRawTensor.RoundT \nRoundT \nRawTensor.ScatterT \nScatterT \nRawTensor.SetMutable \nSetMutable \nRawTensor.SigmoidInPlace \nSigmoidInPlace \nRawTensor.SigmoidT \nSigmoidT \nRawTensor.SignInPlace \nSignInPlace \nRawTensor.SignT \nSignT \nRawTensor.SinInPlace \nSinInPlace \nRawTensor.SinT \nSinT \nRawTensor.SinhInPlace \nSinhInPlace \nRawTensor.SinhT \nSinhT \nRawTensor.SoftplusInPlace \nSoftplusInPlace \nRawTensor.SoftplusT \nSoftplusT \nRawTensor.SolveTT \nSolveTT \nRawTensor.SplitT \nSplitT \nRawTensor.SqrtInPlace \nSqrtInPlace \nRawTensor.SqrtT \nSqrtT \nRawTensor.SqueezeT \nSqueezeT \nRawTensor.StackTs \nStackTs \nRawTensor.SubFromT0T \nSubFromT0T \nRawTensor.SubInPlace \nSubInPlace \nRawTensor.SubScalarInPlace \nSubScalarInPlace \nRawTensor.SubTT \nSubTT \nRawTensor.SubTT0 \nSubTT0 \nRawTensor.SumT \nSumT \nRawTensor.SumTDim \nSumTDim \nRawTensor.TanInPlace \nTanInPlace \nRawTensor.TanT \nTanT \nRawTensor.TanhInPlace \nTanhInPlace \nRawTensor.TanhT \nTanhT \nRawTensor.ToArray \nToArray \nRawTensor.ToScalar \nToScalar \nRawTensor.ToValues \nToValues \nRawTensor.TransposeT \nTransposeT \nRawTensor.TransposeT2 \nTransposeT2 \nRawTensor.UndilateT \nUndilateT \nRawTensor.UnsqueezeT \nUnsqueezeT \nRawTensor.UnstackT \nUnstackT \nRawTensor.ViewT \nViewT \nRawTensor.ZeroLike \nZeroLike \nRawTensor.ZerosInPlace \nZerosInPlace \nRawTensor.ZerosLike \nZerosLike \nRawTensor.DeviceType \nDeviceType \nRawTensor.Device \nDevice \nRawTensor.Dim \nDim \nRawTensor.Dtype \nDtype \nRawTensor.Nelement \nNelement \nRawTensor.Shape \nShape \nRawTensor.Backend \nBackend \nRawTensor.Handle \nHandle \nRawTensor.IsMutable \nIsMutable \nRawTensor.Create \nCreate \nRawTensor.CreateFromFlatArray \nCreateFromFlatArray \nRawTensor.Empty \nEmpty \nRawTensor.Full \nFull \nRawTensor.One \nOne \nRawTensor.Ones \nOnes \nRawTensor.Random \nRandom \nRawTensor.RandomInt \nRandomInt \nRawTensor.RandomNormal \nRandomNormal \nRawTensor.Zero \nZero \nRawTensor.Zeros \nZeros"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#\u0060\u0060.ctor\u0060\u0060","title":"RawTensor.\u0060\u0060.ctor\u0060\u0060","content":"RawTensor.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AbsInPlace","title":"RawTensor.AbsInPlace","content":"RawTensor.AbsInPlace \nAbsInPlace \n\n Modifies the tensor by the element-wise absolute value of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AbsT","title":"RawTensor.AbsT","content":"RawTensor.AbsT \nAbsT \n\n Returns the element-wise absolute value of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AcosInPlace","title":"RawTensor.AcosInPlace","content":"RawTensor.AcosInPlace \nAcosInPlace \n\n Modifies the tensor by the element-wise cos of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AcosT","title":"RawTensor.AcosT","content":"RawTensor.AcosT \nAcosT \n\n Returns the element-wise cos of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AddInPlace","title":"RawTensor.AddInPlace","content":"RawTensor.AddInPlace \nAddInPlace \n\n Modifies the tensor by the element-wise addition of the two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AddScalarInPlace","title":"RawTensor.AddScalarInPlace","content":"RawTensor.AddScalarInPlace \nAddScalarInPlace \n\n Modifies the tensor by the element-wise addition of two scalars\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AddSliceInPlace","title":"RawTensor.AddSliceInPlace","content":"RawTensor.AddSliceInPlace \nAddSliceInPlace \n\n Adds a slice of \u003Cc\u003Et2\u003C/c\u003E at the given location to the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AddTT","title":"RawTensor.AddTT","content":"RawTensor.AddTT \nAddTT \n\n Returns the element-wise addition of the two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AddTT0","title":"RawTensor.AddTT0","content":"RawTensor.AddTT0 \nAddTT0 \n\n Returns the element-wise addition of a tensor and a scalar\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AddTTSlice","title":"RawTensor.AddTTSlice","content":"RawTensor.AddTTSlice \nAddTTSlice \n\n Adds a slice of \u003Cc\u003Et2\u003C/c\u003E at the given location to the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AllClose","title":"RawTensor.AllClose","content":"RawTensor.AllClose \nAllClose \n\n Indicates if the two tensors have the same shape and element type, and all corresponding values\n are equal up to the given tolerances.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AsinInPlace","title":"RawTensor.AsinInPlace","content":"RawTensor.AsinInPlace \nAsinInPlace \n\n Modifies the tensor by the element-wise asin of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AsinT","title":"RawTensor.AsinT","content":"RawTensor.AsinT \nAsinT \n\n Returns the element-wise asin of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AtanInPlace","title":"RawTensor.AtanInPlace","content":"RawTensor.AtanInPlace \nAtanInPlace \n\n Modifies the tensor by the element-wise atan of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AtanT","title":"RawTensor.AtanT","content":"RawTensor.AtanT \nAtanT \n\n Returns the element-wise atan of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AvgPool1D","title":"RawTensor.AvgPool1D","content":"RawTensor.AvgPool1D \nAvgPool1D \n\n Returns the 1D avgpool of a tensor \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AvgPool2D","title":"RawTensor.AvgPool2D","content":"RawTensor.AvgPool2D \nAvgPool2D \n\n Returns the 2D avgpool of a tensor \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AvgPool3D","title":"RawTensor.AvgPool3D","content":"RawTensor.AvgPool3D \nAvgPool3D \n\n Returns the 2D avgpool of a tensor \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AvgPoolReverse1D","title":"RawTensor.AvgPoolReverse1D","content":"RawTensor.AvgPoolReverse1D \nAvgPoolReverse1D \nReturns the reverse mode of a 1D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input \nThe originalInput parameter is only used for shape information"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AvgPoolReverse2D","title":"RawTensor.AvgPoolReverse2D","content":"RawTensor.AvgPoolReverse2D \nAvgPoolReverse2D \nReturns the reverse mode of a 2D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input \nThe originalInput parameter is only used for shape information"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#AvgPoolReverse3D","title":"RawTensor.AvgPoolReverse3D","content":"RawTensor.AvgPoolReverse3D \nAvgPoolReverse3D \nReturns the reverse mode of a 3D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input \nThe originalInput parameter is only used for shape information"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#BMMTT","title":"RawTensor.BMMTT","content":"RawTensor.BMMTT \nBMMTT \n\n Returns the batched matrix multiplication of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Cast","title":"RawTensor.Cast","content":"RawTensor.Cast \nCast \n\n Returns a tensor where the elements have each been cast to the given tensor element storage type.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CatTs","title":"RawTensor.CatTs","content":"RawTensor.CatTs \nCatTs \n\n Concatenate the given tensors along the given dimension\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CeilInPlace","title":"RawTensor.CeilInPlace","content":"RawTensor.CeilInPlace \nCeilInPlace \n\n Modifies the tensor by the element-wise integer ceiling of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CeilT","title":"RawTensor.CeilT","content":"RawTensor.CeilT \nCeilT \n\n Returns the element-wise integer ceiling of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ClampInPlace","title":"RawTensor.ClampInPlace","content":"RawTensor.ClampInPlace \nClampInPlace \n\n Modifies the tensor by with values constrained by the corresponding elements in the low/high tensors.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ClampT","title":"RawTensor.ClampT","content":"RawTensor.ClampT \nClampT \n\n Returns a tensor with values constrained by the corresponding elements in the low/high tensors.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Clone","title":"RawTensor.Clone","content":"RawTensor.Clone \nClone \n\n Clone the underlying storage of the tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ComputeHash","title":"RawTensor.ComputeHash","content":"RawTensor.ComputeHash \nComputeHash \n\n Returns a hash of the contents of the tensor. This operation may cause the\n tensor to be moved to the CPU, and its entire contents iterated.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Conv1D","title":"RawTensor.Conv1D","content":"RawTensor.Conv1D \nConv1D \n\n Returns the 1D convolution of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Conv2D","title":"RawTensor.Conv2D","content":"RawTensor.Conv2D \nConv2D \n\n Returns the 2D convolution of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Conv3D","title":"RawTensor.Conv3D","content":"RawTensor.Conv3D \nConv3D \n\n Returns the 3D convolution of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CosInPlace","title":"RawTensor.CosInPlace","content":"RawTensor.CosInPlace \nCosInPlace \n\n Modifies the tensor by the element-wise cosine of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CosT","title":"RawTensor.CosT","content":"RawTensor.CosT \nCosT \n\n Returns the element-wise cosine of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CoshInPlace","title":"RawTensor.CoshInPlace","content":"RawTensor.CoshInPlace \nCoshInPlace \n\n Modifies the tensor by the element-wise cosh of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CoshT","title":"RawTensor.CoshT","content":"RawTensor.CoshT \nCoshT \n\n Returns the element-wise cosh of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CreateLike","title":"RawTensor.CreateLike","content":"RawTensor.CreateLike \nCreateLike \n\n Gets a tensor filled with values drawn from the given .NET object for the\n given configuration settings, defaulting to the configuration settings of the object tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DetT","title":"RawTensor.DetT","content":"RawTensor.DetT \nDetT \n\n Returns the determinant of a square matrix\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DilateT","title":"RawTensor.DilateT","content":"RawTensor.DilateT \nDilateT \n\n Returns the dilation of the tensor using the given dilations parameters\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DivFromT0T","title":"RawTensor.DivFromT0T","content":"RawTensor.DivFromT0T \nDivFromT0T \n\n Returns the element-wise division of a scalar by a tensor, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DivInPlace","title":"RawTensor.DivInPlace","content":"RawTensor.DivInPlace \nDivInPlace \n\n Modifies the tensor by the element-wise division of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DivScalarInPlace","title":"RawTensor.DivScalarInPlace","content":"RawTensor.DivScalarInPlace \nDivScalarInPlace \n\n Modifies the tensor by the element-wise division of a tensor by a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DivTT","title":"RawTensor.DivTT","content":"RawTensor.DivTT \nDivTT \n\n Returns the element-wise division of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DivTT0","title":"RawTensor.DivTT0","content":"RawTensor.DivTT0 \nDivTT0 \n\n Returns the element-wise division of a tensor by a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#EmptyLike","title":"RawTensor.EmptyLike","content":"RawTensor.EmptyLike \nEmptyLike \n\n Gets a tensor filled with arbitrary values for the given shape and configuration settings,\n defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#EqInPlace","title":"RawTensor.EqInPlace","content":"RawTensor.EqInPlace \nEqInPlace \n\n Modifies the tensor by comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#EqTT","title":"RawTensor.EqTT","content":"RawTensor.EqTT \nEqTT \n\n Returns a boolean tensor comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Equals","title":"RawTensor.Equals","content":"RawTensor.Equals \nEquals \n\n Compare two tensors for equality\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ExpInPlace","title":"RawTensor.ExpInPlace","content":"RawTensor.ExpInPlace \nExpInPlace \n\n Modifies the tensor by the element-wise natural exponentiation of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ExpT","title":"RawTensor.ExpT","content":"RawTensor.ExpT \nExpT \n\n Returns the element-wise natural exponentiation of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Expand","title":"RawTensor.Expand","content":"RawTensor.Expand \nExpand \n\n Expand the shape of the tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#FlipT","title":"RawTensor.FlipT","content":"RawTensor.FlipT \nFlipT \n\n Returns the flip of the tensor along the given dimensions \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#FloorInPlace","title":"RawTensor.FloorInPlace","content":"RawTensor.FloorInPlace \nFloorInPlace \n\n Modifies the tensor by the element-wise integer floor of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#FloorT","title":"RawTensor.FloorT","content":"RawTensor.FloorT \nFloorT \n\n Returns the element-wise integer floor of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#FullLike","title":"RawTensor.FullLike","content":"RawTensor.FullLike \nFullLike \n\n Gets a tensor filled with the given scalar value for the given shape and configuration settings,\n defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#GatherT","title":"RawTensor.GatherT","content":"RawTensor.GatherT \nGatherT \n\n Returns a tensor selecting the given indices from the given dimension and stacking those in the order specified.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#GeInPlace","title":"RawTensor.GeInPlace","content":"RawTensor.GeInPlace \nGeInPlace \n\n Modifies the tensor by comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#GeTT","title":"RawTensor.GeTT","content":"RawTensor.GeTT \nGeTT \n\n Returns a boolean tensor comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#GetItem","title":"RawTensor.GetItem","content":"RawTensor.GetItem \nGetItem \n\n Gets a .NET object representing the value of the tensor at the given indexes\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#GetSlice","title":"RawTensor.GetSlice","content":"RawTensor.GetSlice \nGetSlice \n Get a slice of the given tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#GtInPlace","title":"RawTensor.GtInPlace","content":"RawTensor.GtInPlace \nGtInPlace \n\n Modifies the tensor by comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#GtTT","title":"RawTensor.GtTT","content":"RawTensor.GtTT \nGtTT \n\n Returns a boolean tensor comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#InverseT","title":"RawTensor.InverseT","content":"RawTensor.InverseT \nInverseT \n\n Returns the inverse of a single square matrix (2d tensor) or a batch of square matrices (3d tensor)\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#IsInfT","title":"RawTensor.IsInfT","content":"RawTensor.IsInfT \nIsInfT \n\n Returns a boolean tensor where each element indicates if the corresponding element in the tensor is an infinity value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#IsNaNT","title":"RawTensor.IsNaNT","content":"RawTensor.IsNaNT \nIsNaNT \n\n Returns a boolean tensor where each element indicates if the corresponding element in the tensor is a NaN value\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#LeInPlace","title":"RawTensor.LeInPlace","content":"RawTensor.LeInPlace \nLeInPlace \n\n Modifies the tensor by comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#LeTT","title":"RawTensor.LeTT","content":"RawTensor.LeTT \nLeTT \n\n Returns a boolean tensor comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Log10InPlace","title":"RawTensor.Log10InPlace","content":"RawTensor.Log10InPlace \nLog10InPlace \n\n Modifies the tensor by the element-wise base10 logarithm of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Log10T","title":"RawTensor.Log10T","content":"RawTensor.Log10T \nLog10T \n\n Returns the element-wise base10 logarithm of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#LogInPlace","title":"RawTensor.LogInPlace","content":"RawTensor.LogInPlace \nLogInPlace \n\n Modifies the tensor by the element-wise natural logarithm of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#LogT","title":"RawTensor.LogT","content":"RawTensor.LogT \nLogT \n\n Returns the element-wise natural logarithm of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#LtInPlace","title":"RawTensor.LtInPlace","content":"RawTensor.LtInPlace \nLtInPlace \n\n Modifies the tensor by comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#LtTT","title":"RawTensor.LtTT","content":"RawTensor.LtTT \nLtTT \n\n Returns a boolean tensor comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MatMulInPlace","title":"RawTensor.MatMulInPlace","content":"RawTensor.MatMulInPlace \nMatMulInPlace \n\n Modifies the tensor by the matrix multiplication of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MatMulTT","title":"RawTensor.MatMulTT","content":"RawTensor.MatMulTT \nMatMulTT \n\n Returns the matrix multiplication of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxIndexT","title":"RawTensor.MaxIndexT","content":"RawTensor.MaxIndexT \nMaxIndexT \n\n Gets the index of a maximum value of the tensor \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxPool1D","title":"RawTensor.MaxPool1D","content":"RawTensor.MaxPool1D \nMaxPool1D \n\n Returns the 1D maxpool of a tensor and its chosen maximum indices\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxPool2D","title":"RawTensor.MaxPool2D","content":"RawTensor.MaxPool2D \nMaxPool2D \n\n Returns the 2D maxpool of a tensor and its chosen maximum indices\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxPool3D","title":"RawTensor.MaxPool3D","content":"RawTensor.MaxPool3D \nMaxPool3D \n\n Returns the 3D maxpool of a tensor and its chosen maximum indices\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxReduceT","title":"RawTensor.MaxReduceT","content":"RawTensor.MaxReduceT \nMaxReduceT \n\n Gets a tensor containing values and indexes of a maximum value of the tensor reducing along the given dimension\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxUnpool1D","title":"RawTensor.MaxUnpool1D","content":"RawTensor.MaxUnpool1D \nMaxUnpool1D \n\n Returns the 1D maxunpool of a tensor using the given indices for locations of maximums\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxUnpool2D","title":"RawTensor.MaxUnpool2D","content":"RawTensor.MaxUnpool2D \nMaxUnpool2D \n\n Returns the 2D maxunpool of a tensor using the given indices for locations of maximums\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MaxUnpool3D","title":"RawTensor.MaxUnpool3D","content":"RawTensor.MaxUnpool3D \nMaxUnpool3D \n\n Returns the 3D maxunpool of a tensor using the given indices for locations of maximums\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MinIndexT","title":"RawTensor.MinIndexT","content":"RawTensor.MinIndexT \nMinIndexT \n\n Gets the index of a minimum value of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MinReduceT","title":"RawTensor.MinReduceT","content":"RawTensor.MinReduceT \nMinReduceT \n\n Gets a tensor containing values and indexes of a minimum value of the tensor reducing along the given dimension\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MoveTo","title":"RawTensor.MoveTo","content":"RawTensor.MoveTo \nMoveTo \n\n Returns a tensor moved to the given device.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MulInPlace","title":"RawTensor.MulInPlace","content":"RawTensor.MulInPlace \nMulInPlace \n\n Modifies the tensor by the element-wise multiplication of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MulScalarInPlace","title":"RawTensor.MulScalarInPlace","content":"RawTensor.MulScalarInPlace \nMulScalarInPlace \n\n Modifies the tensor by the element-wise multiplication of a tensor and a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MulTT","title":"RawTensor.MulTT","content":"RawTensor.MulTT \nMulTT \n\n Returns the element-wise multiplication of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#MulTT0","title":"RawTensor.MulTT0","content":"RawTensor.MulTT0 \nMulTT0 \n\n Returns the element-wise multiplication of a tensor and a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#NegInPlace","title":"RawTensor.NegInPlace","content":"RawTensor.NegInPlace \nNegInPlace \n\n Modifies the tensor by the element-wise negation of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#NegT","title":"RawTensor.NegT","content":"RawTensor.NegT \nNegT \n\n Returns the element-wise negation of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#NeqInPlace","title":"RawTensor.NeqInPlace","content":"RawTensor.NeqInPlace \nNeqInPlace \n\n Modifies the tensor by comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#NeqTT","title":"RawTensor.NeqTT","content":"RawTensor.NeqTT \nNeqTT \n\n Returns a boolean tensor comparing each element pairwise with the corresponding element in \u003Cc\u003Et2\u003C/c\u003E\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#OneLike","title":"RawTensor.OneLike","content":"RawTensor.OneLike \nOneLike \n\n Gets a scalar one tensor for the given configuration settings, defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#OnesInPlace","title":"RawTensor.OnesInPlace","content":"RawTensor.OnesInPlace \nOnesInPlace \n\n Modifies the tensor by setting all values to one\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#OnesLike","title":"RawTensor.OnesLike","content":"RawTensor.OnesLike \nOnesLike \n\n Gets a tensor filled with one values for the given shape and configuration settings,\n defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#PermuteT","title":"RawTensor.PermuteT","content":"RawTensor.PermuteT \nPermuteT \n\n Returns a view of the original tensor with its dimensions permuted\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#PowFromT0T","title":"RawTensor.PowFromT0T","content":"RawTensor.PowFromT0T \nPowFromT0T \n\n Returns the element-wise exponentiation of a scalar and a tensor, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#PowInPlace","title":"RawTensor.PowInPlace","content":"RawTensor.PowInPlace \nPowInPlace \n\n Modifies the tensor by the element-wise exponentiation of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#PowScalarInPlace","title":"RawTensor.PowScalarInPlace","content":"RawTensor.PowScalarInPlace \nPowScalarInPlace \n\n Modifies the tensor by the element-wise exponentiation of a tensor and a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#PowTT","title":"RawTensor.PowTT","content":"RawTensor.PowTT \nPowTT \n\n Returns the element-wise exponentiation of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#PowTT0","title":"RawTensor.PowTT0","content":"RawTensor.PowTT0 \nPowTT0 \n\n Returns the element-wise exponentiation of a tensor and a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Print","title":"RawTensor.Print","content":"RawTensor.Print \nPrint \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomInPlace","title":"RawTensor.RandomInPlace","content":"RawTensor.RandomInPlace \nRandomInPlace \n\n Modifies the tensor by setting it to random values taken from a uniform distribution in [0, 1).\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomIntInPlace","title":"RawTensor.RandomIntInPlace","content":"RawTensor.RandomIntInPlace \nRandomIntInPlace \n\n Gets a tensor filled with random integers from the given range \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomIntLike","title":"RawTensor.RandomIntLike","content":"RawTensor.RandomIntLike \nRandomIntLike \n\n Gets a tensor filled with random integer values from the given range for the given shape and configuration settings,\n defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomLike","title":"RawTensor.RandomLike","content":"RawTensor.RandomLike \nRandomLike \n\n Gets a tensor filled with random values for the given shape and configuration settings,\n defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomNormalInPlace","title":"RawTensor.RandomNormalInPlace","content":"RawTensor.RandomNormalInPlace \nRandomNormalInPlace \n\n Modifies the tensor by setting all values taken from a normal distribution with mean 0 and variance 1.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomNormalLike","title":"RawTensor.RandomNormalLike","content":"RawTensor.RandomNormalLike \nRandomNormalLike \n\n Gets a tensor filled with random values from a normal distribution for the given shape and configuration settings,\n defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ReluInPlace","title":"RawTensor.ReluInPlace","content":"RawTensor.ReluInPlace \nReluInPlace \n\n Modifies the tensor by the element-wise ReLU of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ReluT","title":"RawTensor.ReluT","content":"RawTensor.ReluT \nReluT \n\n Returns the element-wise ReLU of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RoundInPlace","title":"RawTensor.RoundInPlace","content":"RawTensor.RoundInPlace \nRoundInPlace \n\n Modifies the tensor by the element-wise rounding of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RoundT","title":"RawTensor.RoundT","content":"RawTensor.RoundT \nRoundT \n\n Returns the element-wise rounding of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ScatterT","title":"RawTensor.ScatterT","content":"RawTensor.ScatterT \nScatterT \n\n Returns a tensor with given destination shape where values are copied from the current tensor to locations specified by the dimension and indices.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SetMutable","title":"RawTensor.SetMutable","content":"RawTensor.SetMutable \nSetMutable \n\n A backdoor to switch this tensor to be usable as a mutable tensor. You should have a unique handle to\n this tensor for the entire time it is being used as a mutable tensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SigmoidInPlace","title":"RawTensor.SigmoidInPlace","content":"RawTensor.SigmoidInPlace \nSigmoidInPlace \n\n Modifies the tensor by the element-wise sigmoid of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SigmoidT","title":"RawTensor.SigmoidT","content":"RawTensor.SigmoidT \nSigmoidT \n\n Returns the element-wise sigmoid of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SignInPlace","title":"RawTensor.SignInPlace","content":"RawTensor.SignInPlace \nSignInPlace \n\n Modifies the tensor by the element-wise sign of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SignT","title":"RawTensor.SignT","content":"RawTensor.SignT \nSignT \n\n Returns the element-wise sign of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SinInPlace","title":"RawTensor.SinInPlace","content":"RawTensor.SinInPlace \nSinInPlace \n\n Modifies the tensor by the element-wise sine of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SinT","title":"RawTensor.SinT","content":"RawTensor.SinT \nSinT \n\n Returns the element-wise sine of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SinhInPlace","title":"RawTensor.SinhInPlace","content":"RawTensor.SinhInPlace \nSinhInPlace \n\n Modifies the tensor by the element-wise sinh of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SinhT","title":"RawTensor.SinhT","content":"RawTensor.SinhT \nSinhT \n\n Returns the element-wise sinh of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SoftplusInPlace","title":"RawTensor.SoftplusInPlace","content":"RawTensor.SoftplusInPlace \nSoftplusInPlace \n\n Modifies the tensor by the element-wise softplus of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SoftplusT","title":"RawTensor.SoftplusT","content":"RawTensor.SoftplusT \nSoftplusT \n\n Returns the element-wise softplus of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SolveTT","title":"RawTensor.SolveTT","content":"RawTensor.SolveTT \nSolveTT \n\n Returns the solution of single a square system of linear equations with a unique solution or a batch of several such systems\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SplitT","title":"RawTensor.SplitT","content":"RawTensor.SplitT \nSplitT \n\n Split the given tensors along the given dimensions\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SqrtInPlace","title":"RawTensor.SqrtInPlace","content":"RawTensor.SqrtInPlace \nSqrtInPlace \n\n Modifies the tensor by the element-wise square root of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SqrtT","title":"RawTensor.SqrtT","content":"RawTensor.SqrtT \nSqrtT \n\n Returns the element-wise square root of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SqueezeT","title":"RawTensor.SqueezeT","content":"RawTensor.SqueezeT \nSqueezeT \n\n Returns the tensor with the same values and the given dimension removed. The given dimension must be of size 1.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#StackTs","title":"RawTensor.StackTs","content":"RawTensor.StackTs \nStackTs \n\n Stack the given tensors along the given dimension\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SubFromT0T","title":"RawTensor.SubFromT0T","content":"RawTensor.SubFromT0T \nSubFromT0T \n\n Returns the element-wise subtraction of the scalar and a tensor, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SubInPlace","title":"RawTensor.SubInPlace","content":"RawTensor.SubInPlace \nSubInPlace \n\n Modifies the tensor by the element-wise subtraction of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SubScalarInPlace","title":"RawTensor.SubScalarInPlace","content":"RawTensor.SubScalarInPlace \nSubScalarInPlace \n\n Modifies the tensor by the element-wise subtraction of the tensor and a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SubTT","title":"RawTensor.SubTT","content":"RawTensor.SubTT \nSubTT \n\n Returns the element-wise subtraction of two tensors\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SubTT0","title":"RawTensor.SubTT0","content":"RawTensor.SubTT0 \nSubTT0 \n\n Returns the element-wise subtraction of the tensor and a scalar, where the scalar is logically\n broadcast to the same shape as the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SumT","title":"RawTensor.SumT","content":"RawTensor.SumT \nSumT \n\n Returns the scalar tensor for the summation of all elements in the tensor \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#SumTDim","title":"RawTensor.SumTDim","content":"RawTensor.SumTDim \nSumTDim \n\n Returns the tensor representing the summation of the tensor along the given dimension\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#TanInPlace","title":"RawTensor.TanInPlace","content":"RawTensor.TanInPlace \nTanInPlace \n\n Modifies the tensor by the element-wise tangent of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#TanT","title":"RawTensor.TanT","content":"RawTensor.TanT \nTanT \n\n Returns the element-wise tangent of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#TanhInPlace","title":"RawTensor.TanhInPlace","content":"RawTensor.TanhInPlace \nTanhInPlace \n\n Modifies the tensor by the element-wise tanh of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#TanhT","title":"RawTensor.TanhT","content":"RawTensor.TanhT \nTanhT \n\n Returns the element-wise tanh of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ToArray","title":"RawTensor.ToArray","content":"RawTensor.ToArray \nToArray \n\n Returns a .NET array object for the values of a non-scalar tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ToScalar","title":"RawTensor.ToScalar","content":"RawTensor.ToScalar \nToScalar \n\n Gets a .NET object representing the value of a scalar tensor \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ToValues","title":"RawTensor.ToValues","content":"RawTensor.ToValues \nToValues \nGet a .NET object for all the values in the tensor. \nThe runtime type of the returned object is either a .NET scalar\n or array corresponding to the shape and element type of the tensor."},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#TransposeT","title":"RawTensor.TransposeT","content":"RawTensor.TransposeT \nTransposeT \n\n Returns the transpose of the tensor between the given dimensions\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#TransposeT2","title":"RawTensor.TransposeT2","content":"RawTensor.TransposeT2 \nTransposeT2 \n\n Returns the transpose of a 2D tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#UndilateT","title":"RawTensor.UndilateT","content":"RawTensor.UndilateT \nUndilateT \n\n Returns the reverse of the dilation of the tensor using the given dilations parameters\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#UnsqueezeT","title":"RawTensor.UnsqueezeT","content":"RawTensor.UnsqueezeT \nUnsqueezeT \n\n Returns the tensor with the same values and a dimension of size 1 inserted before the given dimension.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#UnstackT","title":"RawTensor.UnstackT","content":"RawTensor.UnstackT \nUnstackT \n\n Unstack the given tensors along the given dimension\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ViewT","title":"RawTensor.ViewT","content":"RawTensor.ViewT \nViewT \n\n Returns the tensor with the same values viewed as a different shape\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ZeroLike","title":"RawTensor.ZeroLike","content":"RawTensor.ZeroLike \nZeroLike \n\n Gets a zero tensor for the given configuration settings, defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ZerosInPlace","title":"RawTensor.ZerosInPlace","content":"RawTensor.ZerosInPlace \nZerosInPlace \n\n Modifies the tensor by setting all values to zero\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#ZerosLike","title":"RawTensor.ZerosLike","content":"RawTensor.ZerosLike \nZerosLike \n\n Gets a tensor filled with zero values for the given shape and configuration settings,\n defaulting to the configuration settings of the object tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#DeviceType","title":"RawTensor.DeviceType","content":"RawTensor.DeviceType \nDeviceType \n\n Gets the device type for the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Device","title":"RawTensor.Device","content":"RawTensor.Device \nDevice \n\n Gets the device for the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Dim","title":"RawTensor.Dim","content":"RawTensor.Dim \nDim \n\n Gets the dimensionality of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Dtype","title":"RawTensor.Dtype","content":"RawTensor.Dtype \nDtype \n\n Gets the element storage type for the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Nelement","title":"RawTensor.Nelement","content":"RawTensor.Nelement \nNelement \n\n Gets the number of elements in the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Shape","title":"RawTensor.Shape","content":"RawTensor.Shape \nShape \n\n Gets the shape of the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Backend","title":"RawTensor.Backend","content":"RawTensor.Backend \nBackend \n\n Gets the backend for the tensor\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Handle","title":"RawTensor.Handle","content":"RawTensor.Handle \nHandle \n\n Gets a handle to the underlying representation of the the tensor. For example, if the Torch\n backend is used this will be the corresponding TorchSharp TorchTensor.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#IsMutable","title":"RawTensor.IsMutable","content":"RawTensor.IsMutable \nIsMutable \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Create","title":"RawTensor.Create","content":"RawTensor.Create \nCreate \n\n Gets a tensor filled with values drawn from the given .NET object.\n \n\n The value may be a scalar, an array, or an array of tupled objects. If the \u003Ccode\u003Edtype\u003C/code\u003E is not specified\n then it is inferred from the .NET type of the object.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#CreateFromFlatArray","title":"RawTensor.CreateFromFlatArray","content":"RawTensor.CreateFromFlatArray \nCreateFromFlatArray \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Empty","title":"RawTensor.Empty","content":"RawTensor.Empty \nEmpty \n\n Gets a tensor containing arbitrary values for the given shape and configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Full","title":"RawTensor.Full","content":"RawTensor.Full \nFull \n\n Gets a tensor filled with the given value for the given shape and configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#One","title":"RawTensor.One","content":"RawTensor.One \nOne \n\n Gets the scalar 1 tensor for the given configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Ones","title":"RawTensor.Ones","content":"RawTensor.Ones \nOnes \n\n Gets a tensor filled with 1 values for the given shape and configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Random","title":"RawTensor.Random","content":"RawTensor.Random \nRandom \n\n Gets a tensor filled with random values for the given shape and configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomInt","title":"RawTensor.RandomInt","content":"RawTensor.RandomInt \nRandomInt \n\n Gets a tensor filled with random integer values from the given range for the given shape and configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#RandomNormal","title":"RawTensor.RandomNormal","content":"RawTensor.RandomNormal \nRandomNormal \n\n Gets a tensor filled with random values from the normal distribution for the given shape and configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Zero","title":"RawTensor.Zero","content":"RawTensor.Zero \nZero \n\n Gets the scalar zero tensor for the given configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-backends-rawtensor.html#Zeros","title":"RawTensor.Zeros","content":"RawTensor.Zeros \nZeros \n\n Gets the zero tensor for the given shape and configuration\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataloader.html","title":"DataLoader","content":"DataLoader \n \nDataLoader.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nDataLoader.batch \nbatch \nDataLoader.epoch \nepoch \nDataLoader.length \nlength"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataloader.html#\u0060\u0060.ctor\u0060\u0060","title":"DataLoader.\u0060\u0060.ctor\u0060\u0060","content":"DataLoader.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataloader.html#batch","title":"DataLoader.batch","content":"DataLoader.batch \nbatch \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataloader.html#epoch","title":"DataLoader.epoch","content":"DataLoader.epoch \nepoch \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataloader.html#length","title":"DataLoader.length","content":"DataLoader.length \nlength \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html","title":"Dataset","content":"Dataset \nRepresents a dataset. \nDataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nDataset.GetSlice \nGetSlice \nDataset.filter \nfilter \nDataset.item \nitem \nDataset.loader \nloader \nDataset.length \nlength \nDataset.Item \nItem"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html#\u0060\u0060.ctor\u0060\u0060","title":"Dataset.\u0060\u0060.ctor\u0060\u0060","content":"Dataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html#GetSlice","title":"Dataset.GetSlice","content":"Dataset.GetSlice \nGetSlice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html#filter","title":"Dataset.filter","content":"Dataset.filter \nfilter \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html#item","title":"Dataset.item","content":"Dataset.item \nitem \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html#loader","title":"Dataset.loader","content":"Dataset.loader \nloader \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html#length","title":"Dataset.length","content":"Dataset.length \nlength \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html#Item","title":"Dataset.Item","content":"Dataset.Item \nItem \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-datasetsubset.html","title":"DatasetSubset","content":"DatasetSubset \n \nDatasetSubset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-datasetsubset.html#\u0060\u0060.ctor\u0060\u0060","title":"DatasetSubset.\u0060\u0060.ctor\u0060\u0060","content":"DatasetSubset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-tensordataset.html","title":"TensorDataset","content":"TensorDataset \n \nTensorDataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-tensordataset.html#\u0060\u0060.ctor\u0060\u0060","title":"TensorDataset.\u0060\u0060.ctor\u0060\u0060","content":"TensorDataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html","title":"TextDataset","content":"TextDataset \n \nTextDataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nTextDataset.charToIndex \ncharToIndex \nTextDataset.indexToChar \nindexToChar \nTextDataset.tensorToText \ntensorToText \nTextDataset.textToTensor \ntextToTensor \nTextDataset.chars \nchars \nTextDataset.numChars \nnumChars"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html#\u0060\u0060.ctor\u0060\u0060","title":"TextDataset.\u0060\u0060.ctor\u0060\u0060","content":"TextDataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html#charToIndex","title":"TextDataset.charToIndex","content":"TextDataset.charToIndex \ncharToIndex \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html#indexToChar","title":"TextDataset.indexToChar","content":"TextDataset.indexToChar \nindexToChar \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html#tensorToText","title":"TextDataset.tensorToText","content":"TextDataset.tensorToText \ntensorToText \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html#textToTensor","title":"TextDataset.textToTensor","content":"TextDataset.textToTensor \ntextToTensor \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html#chars","title":"TextDataset.chars","content":"TextDataset.chars \nchars \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-textdataset.html#numChars","title":"TextDataset.numChars","content":"TextDataset.numChars \nnumChars \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-datautil.html","title":"DataUtil","content":"DataUtil \n\n Contains auto-opened utilities related to the Furnace programming model.\n \nDataUtil.download \ndownload \nDataUtil.extractTarStream \nextractTarStream \nDataUtil.extractTarGz \nextractTarGz"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-datautil.html#download","title":"DataUtil.download","content":"DataUtil.download \ndownload \n\n Synchronously downloads the given URL to the given local file.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-datautil.html#extractTarStream","title":"DataUtil.extractTarStream","content":"DataUtil.extractTarStream \nextractTarStream \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-datautil.html#extractTarGz","title":"DataUtil.extractTarGz","content":"DataUtil.extractTarGz \nextractTarGz \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar10.html","title":"CIFAR10","content":"CIFAR10 \n \nCIFAR10.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nCIFAR10.classes \nclasses \nCIFAR10.classNames \nclassNames"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar10.html#\u0060\u0060.ctor\u0060\u0060","title":"CIFAR10.\u0060\u0060.ctor\u0060\u0060","content":"CIFAR10.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar10.html#classes","title":"CIFAR10.classes","content":"CIFAR10.classes \nclasses \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar10.html#classNames","title":"CIFAR10.classNames","content":"CIFAR10.classNames \nclassNames \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar100.html","title":"CIFAR100","content":"CIFAR100 \n \nCIFAR100.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nCIFAR100.classes \nclasses \nCIFAR100.classNames \nclassNames"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar100.html#\u0060\u0060.ctor\u0060\u0060","title":"CIFAR100.\u0060\u0060.ctor\u0060\u0060","content":"CIFAR100.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar100.html#classes","title":"CIFAR100.classes","content":"CIFAR100.classes \nclasses \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-cifar100.html#classNames","title":"CIFAR100.classNames","content":"CIFAR100.classNames \nclassNames \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-imagedataset.html","title":"ImageDataset","content":"ImageDataset \n \nImageDataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nImageDataset.classes \nclasses \nImageDataset.classNames \nclassNames"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-imagedataset.html#\u0060\u0060.ctor\u0060\u0060","title":"ImageDataset.\u0060\u0060.ctor\u0060\u0060","content":"ImageDataset.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-imagedataset.html#classes","title":"ImageDataset.classes","content":"ImageDataset.classes \nclasses \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-imagedataset.html#classNames","title":"ImageDataset.classNames","content":"ImageDataset.classNames \nclassNames \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-mnist.html","title":"MNIST","content":"MNIST \n \nMNIST.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nMNIST.classes \nclasses \nMNIST.classNames \nclassNames"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-mnist.html#\u0060\u0060.ctor\u0060\u0060","title":"MNIST.\u0060\u0060.ctor\u0060\u0060","content":"MNIST.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-mnist.html#classes","title":"MNIST.classes","content":"MNIST.classes \nclasses \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-data-mnist.html#classNames","title":"MNIST.classNames","content":"MNIST.classNames \nclassNames \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-bernoulli.html","title":"Bernoulli","content":"Bernoulli \nRepresents a Bernoulli distribution. \nBernoulli.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBernoulli.probs \nprobs \nBernoulli.logits \nlogits"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-bernoulli.html#\u0060\u0060.ctor\u0060\u0060","title":"Bernoulli.\u0060\u0060.ctor\u0060\u0060","content":"Bernoulli.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-bernoulli.html#probs","title":"Bernoulli.probs","content":"Bernoulli.probs \nprobs \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-bernoulli.html#logits","title":"Bernoulli.logits","content":"Bernoulli.logits \nlogits \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-categorical.html","title":"Categorical","content":"Categorical \nRepresents a Categorial distribution. \nCategorical.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nCategorical.probs \nprobs \nCategorical.logits \nlogits"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-categorical.html#\u0060\u0060.ctor\u0060\u0060","title":"Categorical.\u0060\u0060.ctor\u0060\u0060","content":"Categorical.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-categorical.html#probs","title":"Categorical.probs","content":"Categorical.probs \nprobs \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-categorical.html#logits","title":"Categorical.logits","content":"Categorical.logits \nlogits \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-distribution-1.html","title":"Distribution\u003C\u0027T\u003E","content":"Distribution\u003C\u0027T\u003E \nRepresents a distribution. \nDistribution\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nDistribution\u003C\u0027T\u003E.logprob \nlogprob \nDistribution\u003C\u0027T\u003E.sample \nsample"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-distribution-1.html#\u0060\u0060.ctor\u0060\u0060","title":"Distribution\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060","content":"Distribution\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-distribution-1.html#logprob","title":"Distribution\u003C\u0027T\u003E.logprob","content":"Distribution\u003C\u0027T\u003E.logprob \nlogprob \nReturns the log-probability of the distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-distribution-1.html#sample","title":"Distribution\u003C\u0027T\u003E.sample","content":"Distribution\u003C\u0027T\u003E.sample \nsample \nSamples the distribution"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html","title":"Empirical\u003C\u0027T\u003E","content":"Empirical\u003C\u0027T\u003E \nRepresents an Empirical distribution. \nEmpirical\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nEmpirical\u003C\u0027T\u003E.GetSlice \nGetSlice \nEmpirical\u003C\u0027T\u003E.combineDuplicates \ncombineDuplicates \nEmpirical\u003C\u0027T\u003E.expectation \nexpectation \nEmpirical\u003C\u0027T\u003E.filter \nfilter \nEmpirical\u003C\u0027T\u003E.map \nmap \nEmpirical\u003C\u0027T\u003E.resample \nresample \nEmpirical\u003C\u0027T\u003E.sample \nsample \nEmpirical\u003C\u0027T\u003E.thin \nthin \nEmpirical\u003C\u0027T\u003E.unweighted \nunweighted \nEmpirical\u003C\u0027T\u003E.weights \nweights \nEmpirical\u003C\u0027T\u003E.isWeighted \nisWeighted \nEmpirical\u003C\u0027T\u003E.Item \nItem \nEmpirical\u003C\u0027T\u003E.effectiveSampleSize \neffectiveSampleSize \nEmpirical\u003C\u0027T\u003E.stddev \nstddev \nEmpirical\u003C\u0027T\u003E.mode \nmode \nEmpirical\u003C\u0027T\u003E.valuesTensor \nvaluesTensor \nEmpirical\u003C\u0027T\u003E.mean \nmean \nEmpirical\u003C\u0027T\u003E.length \nlength \nEmpirical\u003C\u0027T\u003E.min \nmin \nEmpirical\u003C\u0027T\u003E.variance \nvariance \nEmpirical\u003C\u0027T\u003E.values \nvalues \nEmpirical\u003C\u0027T\u003E.max \nmax \nEmpirical\u003C\u0027T\u003E.logWeights \nlogWeights"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#\u0060\u0060.ctor\u0060\u0060","title":"Empirical\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060","content":"Empirical\u003C\u0027T\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#GetSlice","title":"Empirical\u003C\u0027T\u003E.GetSlice","content":"Empirical\u003C\u0027T\u003E.GetSlice \nGetSlice \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#combineDuplicates","title":"Empirical\u003C\u0027T\u003E.combineDuplicates","content":"Empirical\u003C\u0027T\u003E.combineDuplicates \ncombineDuplicates \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#expectation","title":"Empirical\u003C\u0027T\u003E.expectation","content":"Empirical\u003C\u0027T\u003E.expectation \nexpectation \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#filter","title":"Empirical\u003C\u0027T\u003E.filter","content":"Empirical\u003C\u0027T\u003E.filter \nfilter \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#map","title":"Empirical\u003C\u0027T\u003E.map","content":"Empirical\u003C\u0027T\u003E.map \nmap \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#resample","title":"Empirical\u003C\u0027T\u003E.resample","content":"Empirical\u003C\u0027T\u003E.resample \nresample \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#sample","title":"Empirical\u003C\u0027T\u003E.sample","content":"Empirical\u003C\u0027T\u003E.sample \nsample \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#thin","title":"Empirical\u003C\u0027T\u003E.thin","content":"Empirical\u003C\u0027T\u003E.thin \nthin \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#unweighted","title":"Empirical\u003C\u0027T\u003E.unweighted","content":"Empirical\u003C\u0027T\u003E.unweighted \nunweighted \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#weights","title":"Empirical\u003C\u0027T\u003E.weights","content":"Empirical\u003C\u0027T\u003E.weights \nweights \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#isWeighted","title":"Empirical\u003C\u0027T\u003E.isWeighted","content":"Empirical\u003C\u0027T\u003E.isWeighted \nisWeighted \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#Item","title":"Empirical\u003C\u0027T\u003E.Item","content":"Empirical\u003C\u0027T\u003E.Item \nItem \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#effectiveSampleSize","title":"Empirical\u003C\u0027T\u003E.effectiveSampleSize","content":"Empirical\u003C\u0027T\u003E.effectiveSampleSize \neffectiveSampleSize \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#stddev","title":"Empirical\u003C\u0027T\u003E.stddev","content":"Empirical\u003C\u0027T\u003E.stddev \nstddev \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#mode","title":"Empirical\u003C\u0027T\u003E.mode","content":"Empirical\u003C\u0027T\u003E.mode \nmode \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#valuesTensor","title":"Empirical\u003C\u0027T\u003E.valuesTensor","content":"Empirical\u003C\u0027T\u003E.valuesTensor \nvaluesTensor \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#mean","title":"Empirical\u003C\u0027T\u003E.mean","content":"Empirical\u003C\u0027T\u003E.mean \nmean \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#length","title":"Empirical\u003C\u0027T\u003E.length","content":"Empirical\u003C\u0027T\u003E.length \nlength \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#min","title":"Empirical\u003C\u0027T\u003E.min","content":"Empirical\u003C\u0027T\u003E.min \nmin \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#variance","title":"Empirical\u003C\u0027T\u003E.variance","content":"Empirical\u003C\u0027T\u003E.variance \nvariance \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#values","title":"Empirical\u003C\u0027T\u003E.values","content":"Empirical\u003C\u0027T\u003E.values \nvalues \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#max","title":"Empirical\u003C\u0027T\u003E.max","content":"Empirical\u003C\u0027T\u003E.max \nmax \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-empirical-1.html#logWeights","title":"Empirical\u003C\u0027T\u003E.logWeights","content":"Empirical\u003C\u0027T\u003E.logWeights \nlogWeights \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-normal.html","title":"Normal","content":"Normal \nRepresents a normal distribution with the given mean and standard deviation with the mean and standard deviation drawn fom the given tensors. \nNormal.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-normal.html#\u0060\u0060.ctor\u0060\u0060","title":"Normal.\u0060\u0060.ctor\u0060\u0060","content":"Normal.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html","title":"TensorDistribution","content":"TensorDistribution \n \nTensorDistribution.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nTensorDistribution.prob \nprob \nTensorDistribution.sample \nsample \nTensorDistribution.variance \nvariance \nTensorDistribution.stddev \nstddev \nTensorDistribution.batchShape \nbatchShape \nTensorDistribution.eventShape \neventShape \nTensorDistribution.mean \nmean"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#\u0060\u0060.ctor\u0060\u0060","title":"TensorDistribution.\u0060\u0060.ctor\u0060\u0060","content":"TensorDistribution.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#prob","title":"TensorDistribution.prob","content":"TensorDistribution.prob \nprob \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#sample","title":"TensorDistribution.sample","content":"TensorDistribution.sample \nsample \nSamples the distribution mutliple times"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#variance","title":"TensorDistribution.variance","content":"TensorDistribution.variance \nvariance \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#stddev","title":"TensorDistribution.stddev","content":"TensorDistribution.stddev \nstddev \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#batchShape","title":"TensorDistribution.batchShape","content":"TensorDistribution.batchShape \nbatchShape \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#eventShape","title":"TensorDistribution.eventShape","content":"TensorDistribution.eventShape \neventShape \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-tensordistribution.html#mean","title":"TensorDistribution.mean","content":"TensorDistribution.mean \nmean \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-uniform.html","title":"Uniform","content":"Uniform \nRepresents a uniform distribution with low and high values drawn from the given tensors. \nUniform.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nUniform.low \nlow \nUniform.high \nhigh \nUniform.range \nrange"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-uniform.html#\u0060\u0060.ctor\u0060\u0060","title":"Uniform.\u0060\u0060.ctor\u0060\u0060","content":"Uniform.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-uniform.html#low","title":"Uniform.low","content":"Uniform.low \nlow \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-uniform.html#high","title":"Uniform.high","content":"Uniform.high \nhigh \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-distributions-uniform.html#range","title":"Uniform.range","content":"Uniform.range \nrange \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html","title":"RecurrentShape","content":"RecurrentShape \n \nRecurrentShape.RNNCell \nRNNCell \nRecurrentShape.RNNCellSequence \nRNNCellSequence \nRecurrentShape.RNNCellWithHidden \nRNNCellWithHidden \nRecurrentShape.RNNCellSequenceWithHidden \nRNNCellSequenceWithHidden \nRecurrentShape.RNN \nRNN \nRecurrentShape.RNNWithHidden \nRNNWithHidden \nRecurrentShape.LSTMCellWithHidden \nLSTMCellWithHidden \nRecurrentShape.LSTMCellSequenceWithHidden \nLSTMCellSequenceWithHidden \nRecurrentShape.LSTMWithHidden \nLSTMWithHidden"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#RNNCell","title":"RecurrentShape.RNNCell","content":"RecurrentShape.RNNCell \nRNNCell \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#RNNCellSequence","title":"RecurrentShape.RNNCellSequence","content":"RecurrentShape.RNNCellSequence \nRNNCellSequence \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#RNNCellWithHidden","title":"RecurrentShape.RNNCellWithHidden","content":"RecurrentShape.RNNCellWithHidden \nRNNCellWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#RNNCellSequenceWithHidden","title":"RecurrentShape.RNNCellSequenceWithHidden","content":"RecurrentShape.RNNCellSequenceWithHidden \nRNNCellSequenceWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#RNN","title":"RecurrentShape.RNN","content":"RecurrentShape.RNN \nRNN \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#RNNWithHidden","title":"RecurrentShape.RNNWithHidden","content":"RecurrentShape.RNNWithHidden \nRNNWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#LSTMCellWithHidden","title":"RecurrentShape.LSTMCellWithHidden","content":"RecurrentShape.LSTMCellWithHidden \nLSTMCellWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#LSTMCellSequenceWithHidden","title":"RecurrentShape.LSTMCellSequenceWithHidden","content":"RecurrentShape.LSTMCellSequenceWithHidden \nLSTMCellSequenceWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-recurrentshape.html#LSTMWithHidden","title":"RecurrentShape.LSTMWithHidden","content":"RecurrentShape.LSTMWithHidden \nLSTMWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm1d.html","title":"BatchNorm1d","content":"BatchNorm1d \nApplies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputs with optional additional channel dimension) \n\u003Cp class=\u0027fsdocs-para\u0027\u003E\n The mean and standard-deviation are calculated per-dimension over the mini-batches and\n \\(\\gamma\\( and \\(\\beta\\) are learnable parameter vectors of size \\(C\\) (where \\(C\\) is the\n input size). By default, the elements of \\(\\gamma\\) are set to 1 and the elements of \n \\(\\beta\\) are set to 0. The standard-deviation is calculated via the biased estimator,\n equivalent to \u003Ccode\u003EFurnaceImage.var(input, unbiased=False)\u003C/code\u003E.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n Also by default, during training this layer keeps running estimates of its computed mean\n and variance, which are then used for normalization during evaluation. The running estimates\n are kept with a default momentum of 0.1.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If trackRunningStats is set to False, this layer then does not keep running estimates,\n and batch statistics are instead used during evaluation time as well.\n \u003C/p\u003E \nBatchNorm1d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBatchNorm1d.bias \nbias \nBatchNorm1d.variance \nvariance \nBatchNorm1d.stddev \nstddev \nBatchNorm1d.weight \nweight \nBatchNorm1d.mean \nmean"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm1d.html#\u0060\u0060.ctor\u0060\u0060","title":"BatchNorm1d.\u0060\u0060.ctor\u0060\u0060","content":"BatchNorm1d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm1d.html#bias","title":"BatchNorm1d.bias","content":"BatchNorm1d.bias \nbias \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm1d.html#variance","title":"BatchNorm1d.variance","content":"BatchNorm1d.variance \nvariance \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm1d.html#stddev","title":"BatchNorm1d.stddev","content":"BatchNorm1d.stddev \nstddev \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm1d.html#weight","title":"BatchNorm1d.weight","content":"BatchNorm1d.weight \nweight \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm1d.html#mean","title":"BatchNorm1d.mean","content":"BatchNorm1d.mean \nmean \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm2d.html","title":"BatchNorm2d","content":"BatchNorm2d \nApplies Batch Normalization over a 4D input (a mini-batch of 2D inputs with optional additional channel dimension) \n\u003Cp class=\u0027fsdocs-para\u0027\u003E\n The mean and standard-deviation are calculated per-dimension over the mini-batches and\n \\(\\gamma\\( and \\(\\beta\\) are learnable parameter vectors of size \\(C\\) (where \\(C\\) is the\n input size). By default, the elements of \\(\\gamma\\) are set to 1 and the elements of \n \\(\\beta\\) are set to 0. The standard-deviation is calculated via the biased estimator,\n equivalent to \u003Ccode\u003EFurnaceImage.var(input, unbiased=False)\u003C/code\u003E.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n Also by default, during training this layer keeps running estimates of its computed mean\n and variance, which are then used for normalization during evaluation. The running estimates\n are kept with a default momentum of 0.1.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If trackRunningStats is set to False, this layer then does not keep running estimates,\n and batch statistics are instead used during evaluation time as well.\n \u003C/p\u003E \nBatchNorm2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBatchNorm2d.bias \nbias \nBatchNorm2d.variance \nvariance \nBatchNorm2d.stddev \nstddev \nBatchNorm2d.weight \nweight \nBatchNorm2d.mean \nmean"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm2d.html#\u0060\u0060.ctor\u0060\u0060","title":"BatchNorm2d.\u0060\u0060.ctor\u0060\u0060","content":"BatchNorm2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm2d.html#bias","title":"BatchNorm2d.bias","content":"BatchNorm2d.bias \nbias \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm2d.html#variance","title":"BatchNorm2d.variance","content":"BatchNorm2d.variance \nvariance \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm2d.html#stddev","title":"BatchNorm2d.stddev","content":"BatchNorm2d.stddev \nstddev \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm2d.html#weight","title":"BatchNorm2d.weight","content":"BatchNorm2d.weight \nweight \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm2d.html#mean","title":"BatchNorm2d.mean","content":"BatchNorm2d.mean \nmean \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm3d.html","title":"BatchNorm3d","content":"BatchNorm3d \nApplies Batch Normalization over a 5D input (a mini-batch of 3D inputs with optional additional channel dimension) \n\u003Cp class=\u0027fsdocs-para\u0027\u003E\n The mean and standard-deviation are calculated per-dimension over the mini-batches and\n \\(\\gamma\\( and \\(\\beta\\) are learnable parameter vectors of size \\(C\\) (where \\(C\\) is the\n input size). By default, the elements of \\(\\gamma\\) are set to 1 and the elements of \n \\(\\beta\\) are set to 0. The standard-deviation is calculated via the biased estimator,\n equivalent to \u003Ccode\u003EFurnaceImage.var(input, unbiased=False)\u003C/code\u003E.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n Also by default, during training this layer keeps running estimates of its computed mean\n and variance, which are then used for normalization during evaluation. The running estimates\n are kept with a default momentum of 0.1.\n \u003C/p\u003E\u003Cp class=\u0027fsdocs-para\u0027\u003E\n If trackRunningStats is set to False, this layer then does not keep running estimates,\n and batch statistics are instead used during evaluation time as well.\n \u003C/p\u003E \nBatchNorm3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nBatchNorm3d.bias \nbias \nBatchNorm3d.variance \nvariance \nBatchNorm3d.stddev \nstddev \nBatchNorm3d.weight \nweight \nBatchNorm3d.mean \nmean"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm3d.html#\u0060\u0060.ctor\u0060\u0060","title":"BatchNorm3d.\u0060\u0060.ctor\u0060\u0060","content":"BatchNorm3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm3d.html#bias","title":"BatchNorm3d.bias","content":"BatchNorm3d.bias \nbias \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm3d.html#variance","title":"BatchNorm3d.variance","content":"BatchNorm3d.variance \nvariance \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm3d.html#stddev","title":"BatchNorm3d.stddev","content":"BatchNorm3d.stddev \nstddev \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm3d.html#weight","title":"BatchNorm3d.weight","content":"BatchNorm3d.weight \nweight \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-batchnorm3d.html#mean","title":"BatchNorm3d.mean","content":"BatchNorm3d.mean \nmean \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv1d.html","title":"Conv1d","content":"Conv1d \nA model that applies a 1D convolution over an input signal composed of several input planes \nConv1d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nConv1d.bias \nbias \nConv1d.weight \nweight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv1d.html#\u0060\u0060.ctor\u0060\u0060","title":"Conv1d.\u0060\u0060.ctor\u0060\u0060","content":"Conv1d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv1d.html#bias","title":"Conv1d.bias","content":"Conv1d.bias \nbias \nGet or set the bias parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv1d.html#weight","title":"Conv1d.weight","content":"Conv1d.weight \nweight \nGet or set the weight parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv2d.html","title":"Conv2d","content":"Conv2d \nA model that applies a 2D convolution over an input signal composed of several input planes \nConv2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nConv2d.bias \nbias \nConv2d.weight \nweight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv2d.html#\u0060\u0060.ctor\u0060\u0060","title":"Conv2d.\u0060\u0060.ctor\u0060\u0060","content":"Conv2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv2d.html#bias","title":"Conv2d.bias","content":"Conv2d.bias \nbias \nGet or set the bias parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv2d.html#weight","title":"Conv2d.weight","content":"Conv2d.weight \nweight \nGet or set the weight parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv3d.html","title":"Conv3d","content":"Conv3d \nA model that applies a 3D convolution over an input signal composed of several input planes \nConv3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nConv3d.bias \nbias \nConv3d.weight \nweight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv3d.html#\u0060\u0060.ctor\u0060\u0060","title":"Conv3d.\u0060\u0060.ctor\u0060\u0060","content":"Conv3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv3d.html#bias","title":"Conv3d.bias","content":"Conv3d.bias \nbias \nGet or set the bias parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-conv3d.html#weight","title":"Conv3d.weight","content":"Conv3d.weight \nweight \nGet or set the weight parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose1d.html","title":"ConvTranspose1d","content":"ConvTranspose1d \nA model that applies a 1D transposed convolution operator over an input image composed of several input planes. \nConvTranspose1d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nConvTranspose1d.bias \nbias \nConvTranspose1d.weight \nweight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose1d.html#\u0060\u0060.ctor\u0060\u0060","title":"ConvTranspose1d.\u0060\u0060.ctor\u0060\u0060","content":"ConvTranspose1d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose1d.html#bias","title":"ConvTranspose1d.bias","content":"ConvTranspose1d.bias \nbias \nGet or set the bias parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose1d.html#weight","title":"ConvTranspose1d.weight","content":"ConvTranspose1d.weight \nweight \nGet or set the weight parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose2d.html","title":"ConvTranspose2d","content":"ConvTranspose2d \nA model that applies a 2D transposed convolution operator over an input image composed of several input planes. \nConvTranspose2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nConvTranspose2d.bias \nbias \nConvTranspose2d.weight \nweight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose2d.html#\u0060\u0060.ctor\u0060\u0060","title":"ConvTranspose2d.\u0060\u0060.ctor\u0060\u0060","content":"ConvTranspose2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose2d.html#bias","title":"ConvTranspose2d.bias","content":"ConvTranspose2d.bias \nbias \nGet or set the bias parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose2d.html#weight","title":"ConvTranspose2d.weight","content":"ConvTranspose2d.weight \nweight \nGet or set the weight parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose3d.html","title":"ConvTranspose3d","content":"ConvTranspose3d \nA model that applies a 3D transposed convolution operator over an input image composed of several input planes. \nConvTranspose3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nConvTranspose3d.bias \nbias \nConvTranspose3d.weight \nweight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose3d.html#\u0060\u0060.ctor\u0060\u0060","title":"ConvTranspose3d.\u0060\u0060.ctor\u0060\u0060","content":"ConvTranspose3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose3d.html#bias","title":"ConvTranspose3d.bias","content":"ConvTranspose3d.bias \nbias \nGet or set the bias parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-convtranspose3d.html#weight","title":"ConvTranspose3d.weight","content":"ConvTranspose3d.weight \nweight \nGet or set the weight parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-dropout.html","title":"Dropout","content":"Dropout \nA model which during training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution. \nDropout.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-dropout.html#\u0060\u0060.ctor\u0060\u0060","title":"Dropout.\u0060\u0060.ctor\u0060\u0060","content":"Dropout.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-dropout2d.html","title":"Dropout2d","content":"Dropout2d \nA model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. \nDropout2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-dropout2d.html#\u0060\u0060.ctor\u0060\u0060","title":"Dropout2d.\u0060\u0060.ctor\u0060\u0060","content":"Dropout2d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-dropout3d.html","title":"Dropout3d","content":"Dropout3d \nA model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. \nDropout3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-dropout3d.html#\u0060\u0060.ctor\u0060\u0060","title":"Dropout3d.\u0060\u0060.ctor\u0060\u0060","content":"Dropout3d.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstm.html","title":"LSTM","content":"LSTM \nLong short-term memory (LSTM) recurrent neural network. \nLSTM.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nLSTM.forwardWithHidden \nforwardWithHidden \nLSTM.newHidden \nnewHidden \nLSTM.hiddenSize \nhiddenSize \nLSTM.inputSize \ninputSize"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstm.html#\u0060\u0060.ctor\u0060\u0060","title":"LSTM.\u0060\u0060.ctor\u0060\u0060","content":"LSTM.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstm.html#forwardWithHidden","title":"LSTM.forwardWithHidden","content":"LSTM.forwardWithHidden \nforwardWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstm.html#newHidden","title":"LSTM.newHidden","content":"LSTM.newHidden \nnewHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstm.html#hiddenSize","title":"LSTM.hiddenSize","content":"LSTM.hiddenSize \nhiddenSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstm.html#inputSize","title":"LSTM.inputSize","content":"LSTM.inputSize \ninputSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html","title":"LSTMCell","content":"LSTMCell \nUnit cell of a long short-term memory (LSTM) recurrent neural network. Prefer using the RNN class instead, which can combine RNNCells in multiple layers. \nLSTMCell.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nLSTMCell.forwardSequence \nforwardSequence \nLSTMCell.forwardSequenceWithHidden \nforwardSequenceWithHidden \nLSTMCell.forwardWithHidden \nforwardWithHidden \nLSTMCell.newHidden \nnewHidden \nLSTMCell.hiddenSize \nhiddenSize \nLSTMCell.inputSize \ninputSize"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html#\u0060\u0060.ctor\u0060\u0060","title":"LSTMCell.\u0060\u0060.ctor\u0060\u0060","content":"LSTMCell.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html#forwardSequence","title":"LSTMCell.forwardSequence","content":"LSTMCell.forwardSequence \nforwardSequence \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html#forwardSequenceWithHidden","title":"LSTMCell.forwardSequenceWithHidden","content":"LSTMCell.forwardSequenceWithHidden \nforwardSequenceWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html#forwardWithHidden","title":"LSTMCell.forwardWithHidden","content":"LSTMCell.forwardWithHidden \nforwardWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html#newHidden","title":"LSTMCell.newHidden","content":"LSTMCell.newHidden \nnewHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html#hiddenSize","title":"LSTMCell.hiddenSize","content":"LSTMCell.hiddenSize \nhiddenSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-lstmcell.html#inputSize","title":"LSTMCell.inputSize","content":"LSTMCell.inputSize \ninputSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-linear.html","title":"Linear","content":"Linear \nA model that applies a linear transformation to the incoming data: \\(y = xA^T \u002B b\\) \nLinear.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nLinear.bias \nbias \nLinear.weight \nweight"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-linear.html#\u0060\u0060.ctor\u0060\u0060","title":"Linear.\u0060\u0060.ctor\u0060\u0060","content":"Linear.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-linear.html#bias","title":"Linear.bias","content":"Linear.bias \nbias \nGet or set the bias parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-linear.html#weight","title":"Linear.weight","content":"Linear.weight \nweight \nGet or set the weight parameter of the model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-mode.html","title":"Mode","content":"Mode \nIndicates the training or evaluation mode for a model. \nMode.Train \nTrain \nMode.Eval \nEval"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-mode.html#Train","title":"Mode.Train","content":"Mode.Train \nTrain \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-mode.html#Eval","title":"Mode.Eval","content":"Mode.Eval \nEval \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html","title":"Model\u003C\u0027In, \u0027Out\u003E","content":"Model\u003C\u0027In, \u0027Out\u003E \nRepresents a model, primarily a collection of named parameters and sub-models and a function governed by them. \nModel\u003C\u0027In, \u0027Out\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nModel\u003C\u0027In, \u0027Out\u003E.asFunction \nasFunction \nModel\u003C\u0027In, \u0027Out\u003E.clone \nclone \nModel\u003C\u0027In, \u0027Out\u003E.forward \nforward \nModel\u003C\u0027In, \u0027Out\u003E.compose \ncompose \nModel\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E) \nModel\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E) \nModel\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E) \nModel\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E)"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#\u0060\u0060.ctor\u0060\u0060","title":"Model\u003C\u0027In, \u0027Out\u003E.\u0060\u0060.ctor\u0060\u0060","content":"Model\u003C\u0027In, \u0027Out\u003E.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#asFunction","title":"Model\u003C\u0027In, \u0027Out\u003E.asFunction","content":"Model\u003C\u0027In, \u0027Out\u003E.asFunction \nasFunction \nUse the model as a function of its parameters and input. \n\n The resulting function can be composed with a loss function and differentiated.\n During execution the parameters of the model are temporarily set to the supplied parameters.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#clone","title":"Model\u003C\u0027In, \u0027Out\u003E.clone","content":"Model\u003C\u0027In, \u0027Out\u003E.clone \nclone \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#forward","title":"Model\u003C\u0027In, \u0027Out\u003E.forward","content":"Model\u003C\u0027In, \u0027Out\u003E.forward \nforward \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#compose","title":"Model\u003C\u0027In, \u0027Out\u003E.compose","content":"Model\u003C\u0027In, \u0027Out\u003E.compose \ncompose \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#(--\u003E)","title":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E)","content":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E) \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#(--\u003E)","title":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E)","content":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E) \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#(--\u003E)","title":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E)","content":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E) \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model-2.html#(--\u003E)","title":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E)","content":"Model\u003C\u0027In, \u0027Out\u003E.(--\u003E) \n(--\u003E) \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-model.html","title":"Model","content":"Model \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html","title":"ModelBase","content":"ModelBase \nRepresents the base class of all models. \nModelBase.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nModelBase.addBuffer \naddBuffer \nModelBase.addBuffer \naddBuffer \nModelBase.addBuffer \naddBuffer \nModelBase.addModel \naddModel \nModelBase.addModel \naddModel \nModelBase.addModel \naddModel \nModelBase.addModel \naddModel \nModelBase.addModel \naddModel \nModelBase.addModel \naddModel \nModelBase.addParameter \naddParameter \nModelBase.addParameter \naddParameter \nModelBase.addParameter \naddParameter \nModelBase.clone \nclone \nModelBase.eval \neval \nModelBase.forwardDiff \nforwardDiff \nModelBase.init \ninit \nModelBase.move \nmove \nModelBase.noDiff \nnoDiff \nModelBase.reverseDiff \nreverseDiff \nModelBase.summary \nsummary \nModelBase.train \ntrain \nModelBase.children \nchildren \nModelBase.nparameters \nnparameters \nModelBase.backend \nbackend \nModelBase.isNoDiff \nisNoDiff \nModelBase.hasOwnBuffers \nhasOwnBuffers \nModelBase.parametersVector \nparametersVector \nModelBase.hasOwnState \nhasOwnState \nModelBase.state \nstate \nModelBase.buffersVector \nbuffersVector \nModelBase.isForwardDiff \nisForwardDiff \nModelBase.device \ndevice \nModelBase.nstate \nnstate \nModelBase.buffers \nbuffers \nModelBase.isReverseDiff \nisReverseDiff \nModelBase.nbuffers \nnbuffers \nModelBase.descendants \ndescendants \nModelBase.dtype \ndtype \nModelBase.parameters \nparameters \nModelBase.stateVector \nstateVector \nModelBase.hasOwnParameters \nhasOwnParameters \nModelBase.mode \nmode"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#\u0060\u0060.ctor\u0060\u0060","title":"ModelBase.\u0060\u0060.ctor\u0060\u0060","content":"ModelBase.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addBuffer","title":"ModelBase.addBuffer","content":"ModelBase.addBuffer \naddBuffer \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addBuffer","title":"ModelBase.addBuffer","content":"ModelBase.addBuffer \naddBuffer \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addBuffer","title":"ModelBase.addBuffer","content":"ModelBase.addBuffer \naddBuffer \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addModel","title":"ModelBase.addModel","content":"ModelBase.addModel \naddModel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addModel","title":"ModelBase.addModel","content":"ModelBase.addModel \naddModel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addModel","title":"ModelBase.addModel","content":"ModelBase.addModel \naddModel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addModel","title":"ModelBase.addModel","content":"ModelBase.addModel \naddModel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addModel","title":"ModelBase.addModel","content":"ModelBase.addModel \naddModel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addModel","title":"ModelBase.addModel","content":"ModelBase.addModel \naddModel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addParameter","title":"ModelBase.addParameter","content":"ModelBase.addParameter \naddParameter \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addParameter","title":"ModelBase.addParameter","content":"ModelBase.addParameter \naddParameter \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#addParameter","title":"ModelBase.addParameter","content":"ModelBase.addParameter \naddParameter \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#clone","title":"ModelBase.clone","content":"ModelBase.clone \nclone \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#eval","title":"ModelBase.eval","content":"ModelBase.eval \neval \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#forwardDiff","title":"ModelBase.forwardDiff","content":"ModelBase.forwardDiff \nforwardDiff \n\n Adjust the parameters of the model to initiate a new level of forward-mode automatic differentiation.\n \n\n After this call the current parameters of the model will have attached derivatives for forward mode differentiation.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#init","title":"ModelBase.init","content":"ModelBase.init \ninit \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#move","title":"ModelBase.move","content":"ModelBase.move \nmove \nMoves the state (parameters and buffers) of the model to the given configuration"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#noDiff","title":"ModelBase.noDiff","content":"ModelBase.noDiff \nnoDiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#reverseDiff","title":"ModelBase.reverseDiff","content":"ModelBase.reverseDiff \nreverseDiff \n\n Adjust the parameters of the model to initiate a new level of reverse-mode automatic differentiation.\n \n\n After this call the current parameters of the model will support reverse-mode differentiation. After the completion\n of the corresponding \u003Ccode\u003Ereverse\u003C/code\u003E operation, the computed derivatives will be available. \n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#summary","title":"ModelBase.summary","content":"ModelBase.summary \nsummary \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#train","title":"ModelBase.train","content":"ModelBase.train \ntrain \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#children","title":"ModelBase.children","content":"ModelBase.children \nchildren \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#nparameters","title":"ModelBase.nparameters","content":"ModelBase.nparameters \nnparameters \nGets the number of parameters of the Model"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#backend","title":"ModelBase.backend","content":"ModelBase.backend \nbackend \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#isNoDiff","title":"ModelBase.isNoDiff","content":"ModelBase.isNoDiff \nisNoDiff \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#hasOwnBuffers","title":"ModelBase.hasOwnBuffers","content":"ModelBase.hasOwnBuffers \nhasOwnBuffers \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#parametersVector","title":"ModelBase.parametersVector","content":"ModelBase.parametersVector \nparametersVector \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#hasOwnState","title":"ModelBase.hasOwnState","content":"ModelBase.hasOwnState \nhasOwnState \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#state","title":"ModelBase.state","content":"ModelBase.state \nstate \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#buffersVector","title":"ModelBase.buffersVector","content":"ModelBase.buffersVector \nbuffersVector \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#isForwardDiff","title":"ModelBase.isForwardDiff","content":"ModelBase.isForwardDiff \nisForwardDiff \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#device","title":"ModelBase.device","content":"ModelBase.device \ndevice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#nstate","title":"ModelBase.nstate","content":"ModelBase.nstate \nnstate \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#buffers","title":"ModelBase.buffers","content":"ModelBase.buffers \nbuffers \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#isReverseDiff","title":"ModelBase.isReverseDiff","content":"ModelBase.isReverseDiff \nisReverseDiff \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#nbuffers","title":"ModelBase.nbuffers","content":"ModelBase.nbuffers \nnbuffers \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#descendants","title":"ModelBase.descendants","content":"ModelBase.descendants \ndescendants \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#dtype","title":"ModelBase.dtype","content":"ModelBase.dtype \ndtype \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#parameters","title":"ModelBase.parameters","content":"ModelBase.parameters \nparameters \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#stateVector","title":"ModelBase.stateVector","content":"ModelBase.stateVector \nstateVector \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#hasOwnParameters","title":"ModelBase.hasOwnParameters","content":"ModelBase.hasOwnParameters \nhasOwnParameters \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-modelbase.html#mode","title":"ModelBase.mode","content":"ModelBase.mode \nmode \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html","title":"Parameter","content":"Parameter \nRepresents a parameter. \nA parameter is a mutable register holding a tensor. \nParameter.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nParameter.copy \ncopy \nParameter.forwardDiff \nforwardDiff \nParameter.move \nmove \nParameter.noDiff \nnoDiff \nParameter.reverseDiff \nreverseDiff \nParameter.value \nvalue"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html#\u0060\u0060.ctor\u0060\u0060","title":"Parameter.\u0060\u0060.ctor\u0060\u0060","content":"Parameter.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html#copy","title":"Parameter.copy","content":"Parameter.copy \ncopy \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html#forwardDiff","title":"Parameter.forwardDiff","content":"Parameter.forwardDiff \nforwardDiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html#move","title":"Parameter.move","content":"Parameter.move \nmove \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html#noDiff","title":"Parameter.noDiff","content":"Parameter.noDiff \nnoDiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html#reverseDiff","title":"Parameter.reverseDiff","content":"Parameter.reverseDiff \nreverseDiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameter.html#value","title":"Parameter.value","content":"Parameter.value \nvalue \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html","title":"ParameterDict","content":"ParameterDict \nRepresents a collection of named parameters. \nParameterDict.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nParameterDict.add \nadd \nParameterDict.add \nadd \nParameterDict.add \nadd \nParameterDict.clear \nclear \nParameterDict.copy \ncopy \nParameterDict.flatten \nflatten \nParameterDict.forwardDiff \nforwardDiff \nParameterDict.iter \niter \nParameterDict.map \nmap \nParameterDict.map \nmap \nParameterDict.move \nmove \nParameterDict.noDiff \nnoDiff \nParameterDict.reverseDiff \nreverseDiff \nParameterDict.set \nset \nParameterDict.unflatten \nunflatten \nParameterDict.unflattenToNew \nunflattenToNew \nParameterDict.dtype \ndtype \nParameterDict.count \ncount \nParameterDict.device \ndevice \nParameterDict.backend \nbackend \nParameterDict.Item \nItem \nParameterDict.nelement \nnelement"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#\u0060\u0060.ctor\u0060\u0060","title":"ParameterDict.\u0060\u0060.ctor\u0060\u0060","content":"ParameterDict.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#add","title":"ParameterDict.add","content":"ParameterDict.add \nadd \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#add","title":"ParameterDict.add","content":"ParameterDict.add \nadd \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#add","title":"ParameterDict.add","content":"ParameterDict.add \nadd \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#clear","title":"ParameterDict.clear","content":"ParameterDict.clear \nclear \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#copy","title":"ParameterDict.copy","content":"ParameterDict.copy \ncopy \nTBD \n\n This method discards differentiability and returns a ParameterDict containing parameters that are constant tensors.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#flatten","title":"ParameterDict.flatten","content":"ParameterDict.flatten \nflatten \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#forwardDiff","title":"ParameterDict.forwardDiff","content":"ParameterDict.forwardDiff \nforwardDiff \n\n Adjust the parameters to include support for forward-mode automatic differentiation.\n \n\n After this call the current parameters in this dictionary will have attached derivatives for forward mode differentiation.\n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#iter","title":"ParameterDict.iter","content":"ParameterDict.iter \niter \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#map","title":"ParameterDict.map","content":"ParameterDict.map \nmap \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#map","title":"ParameterDict.map","content":"ParameterDict.map \nmap \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#move","title":"ParameterDict.move","content":"ParameterDict.move \nmove \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#noDiff","title":"ParameterDict.noDiff","content":"ParameterDict.noDiff \nnoDiff \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#reverseDiff","title":"ParameterDict.reverseDiff","content":"ParameterDict.reverseDiff \nreverseDiff \n\n Adjust the parameters to include support for reverse-mode automatic differentiation.\n \n\n After this call the current parameters in this dictionary will support reverse-mode differentiation. After the completion\n of the corresponding \u003Ccode\u003Ereverse\u003C/code\u003E operation, the computed derivative\n will be available. \n "},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#set","title":"ParameterDict.set","content":"ParameterDict.set \nset \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#unflatten","title":"ParameterDict.unflatten","content":"ParameterDict.unflatten \nunflatten \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#unflattenToNew","title":"ParameterDict.unflattenToNew","content":"ParameterDict.unflattenToNew \nunflattenToNew \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#dtype","title":"ParameterDict.dtype","content":"ParameterDict.dtype \ndtype \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#count","title":"ParameterDict.count","content":"ParameterDict.count \ncount \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#device","title":"ParameterDict.device","content":"ParameterDict.device \ndevice \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#backend","title":"ParameterDict.backend","content":"ParameterDict.backend \nbackend \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#Item","title":"ParameterDict.Item","content":"ParameterDict.Item \nItem \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-parameterdict.html#nelement","title":"ParameterDict.nelement","content":"ParameterDict.nelement \nnelement \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnn.html","title":"RNN","content":"RNN \nRecurrent neural network. \nRNN.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nRNN.forwardWithHidden \nforwardWithHidden \nRNN.newHidden \nnewHidden \nRNN.hiddenSize \nhiddenSize \nRNN.inputSize \ninputSize"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnn.html#\u0060\u0060.ctor\u0060\u0060","title":"RNN.\u0060\u0060.ctor\u0060\u0060","content":"RNN.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnn.html#forwardWithHidden","title":"RNN.forwardWithHidden","content":"RNN.forwardWithHidden \nforwardWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnn.html#newHidden","title":"RNN.newHidden","content":"RNN.newHidden \nnewHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnn.html#hiddenSize","title":"RNN.hiddenSize","content":"RNN.hiddenSize \nhiddenSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnn.html#inputSize","title":"RNN.inputSize","content":"RNN.inputSize \ninputSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html","title":"RNNCell","content":"RNNCell \nUnit cell of a recurrent neural network. Prefer using the RNN class instead, which can combine RNNCells in multiple layers. \nRNNCell.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nRNNCell.forwardSequence \nforwardSequence \nRNNCell.forwardSequenceWithHidden \nforwardSequenceWithHidden \nRNNCell.forwardWithHidden \nforwardWithHidden \nRNNCell.newHidden \nnewHidden \nRNNCell.hiddenSize \nhiddenSize \nRNNCell.inputSize \ninputSize"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html#\u0060\u0060.ctor\u0060\u0060","title":"RNNCell.\u0060\u0060.ctor\u0060\u0060","content":"RNNCell.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html#forwardSequence","title":"RNNCell.forwardSequence","content":"RNNCell.forwardSequence \nforwardSequence \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html#forwardSequenceWithHidden","title":"RNNCell.forwardSequenceWithHidden","content":"RNNCell.forwardSequenceWithHidden \nforwardSequenceWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html#forwardWithHidden","title":"RNNCell.forwardWithHidden","content":"RNNCell.forwardWithHidden \nforwardWithHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html#newHidden","title":"RNNCell.newHidden","content":"RNNCell.newHidden \nnewHidden \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html#hiddenSize","title":"RNNCell.hiddenSize","content":"RNNCell.hiddenSize \nhiddenSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-rnncell.html#inputSize","title":"RNNCell.inputSize","content":"RNNCell.inputSize \ninputSize \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-sequential.html","title":"Sequential","content":"Sequential \n \nSequential.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-sequential.html#\u0060\u0060.ctor\u0060\u0060","title":"Sequential.\u0060\u0060.ctor\u0060\u0060","content":"Sequential.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vae.html","title":"VAE","content":"VAE \nVariational auto-encoder \nVAE.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vae.html#\u0060\u0060.ctor\u0060\u0060","title":"VAE.\u0060\u0060.ctor\u0060\u0060","content":"VAE.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html","title":"VAEBase","content":"VAEBase \nVariational auto-encoder base \nVAEBase.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nVAEBase.decode \ndecode \nVAEBase.encode \nencode \nVAEBase.encodeDecode \nencodeDecode \nVAEBase.loss \nloss \nVAEBase.sample \nsample \nVAEBase.loss \nloss"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html#\u0060\u0060.ctor\u0060\u0060","title":"VAEBase.\u0060\u0060.ctor\u0060\u0060","content":"VAEBase.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html#decode","title":"VAEBase.decode","content":"VAEBase.decode \ndecode \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html#encode","title":"VAEBase.encode","content":"VAEBase.encode \nencode \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html#encodeDecode","title":"VAEBase.encodeDecode","content":"VAEBase.encodeDecode \nencodeDecode \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html#loss","title":"VAEBase.loss","content":"VAEBase.loss \nloss \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html#sample","title":"VAEBase.sample","content":"VAEBase.sample \nsample \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaebase.html#loss","title":"VAEBase.loss","content":"VAEBase.loss \nloss \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaemlp.html","title":"VAEMLP","content":"VAEMLP \nVariational auto-encoder with multilayer perceptron (MLP) encoder and decoder. \nVAEMLP.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-vaemlp.html#\u0060\u0060.ctor\u0060\u0060","title":"VAEMLP.\u0060\u0060.ctor\u0060\u0060","content":"VAEMLP.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-weight.html","title":"Weight","content":"Weight \nContains functionality related to generating initial parameter weights for models. \nWeight.kaiming \nkaiming \nWeight.uniform \nuniform"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-weight.html#kaiming","title":"Weight.kaiming","content":"Weight.kaiming \nkaiming \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-model-weight.html#uniform","title":"Weight.uniform","content":"Weight.uniform \nuniform \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-adam.html","title":"Adam","content":"Adam \nTBD \nAdam.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-adam.html#\u0060\u0060.ctor\u0060\u0060","title":"Adam.\u0060\u0060.ctor\u0060\u0060","content":"Adam.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optimizer.html","title":"Optimizer","content":"Optimizer \nRepresents an optimizer. \nOptimizer.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nOptimizer.step \nstep \nOptimizer.updateRule \nupdateRule \nOptimizer.model \nmodel \nOptimizer.stateStep \nstateStep"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optimizer.html#\u0060\u0060.ctor\u0060\u0060","title":"Optimizer.\u0060\u0060.ctor\u0060\u0060","content":"Optimizer.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optimizer.html#step","title":"Optimizer.step","content":"Optimizer.step \nstep \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optimizer.html#updateRule","title":"Optimizer.updateRule","content":"Optimizer.updateRule \nupdateRule \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optimizer.html#model","title":"Optimizer.model","content":"Optimizer.model \nmodel \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optimizer.html#stateStep","title":"Optimizer.stateStep","content":"Optimizer.stateStep \nstateStep \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-sgd.html","title":"SGD","content":"SGD \nTBD \nSGD.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-sgd.html#\u0060\u0060.ctor\u0060\u0060","title":"SGD.\u0060\u0060.ctor\u0060\u0060","content":"SGD.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optim.html","title":"optim","content":"optim \nTBD \noptim.adam \nadam \noptim.adam \nadam \noptim.sgd \nsgd \noptim.sgd \nsgd"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optim.html#adam","title":"optim.adam","content":"optim.adam \nadam \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optim.html#adam","title":"optim.adam","content":"optim.adam \nadam \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optim.html#sgd","title":"optim.sgd","content":"optim.sgd \nsgd \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-optim-optim.html#sgd","title":"optim.sgd","content":"optim.sgd \nsgd \nTBD"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html","title":"Array","content":"Array \n\n Contains extensions to the F# Array module. \n \nArray.allClose \nallClose \nArray.cumulativeSum \ncumulativeSum \nArray.getUniqueCounts \ngetUniqueCounts \nArray.initFlat2D \ninitFlat2D \nArray.initFlat3D \ninitFlat3D \nArray.foralli \nforalli \nArray.insertManyAt \ninsertManyAt \nArray.removeAt \nremoveAt"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#allClose","title":"Array.allClose","content":"Array.allClose \nallClose \n\n Determines if all values of the first array lie within the given tolerances of the second array.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#cumulativeSum","title":"Array.cumulativeSum","content":"Array.cumulativeSum \ncumulativeSum \n\n Gets the cumulative sum of the input array.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#getUniqueCounts","title":"Array.getUniqueCounts","content":"Array.getUniqueCounts \ngetUniqueCounts \n\n Gets the unique counts of the input array.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#initFlat2D","title":"Array.initFlat2D","content":"Array.initFlat2D \ninitFlat2D \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#initFlat3D","title":"Array.initFlat3D","content":"Array.initFlat3D \ninitFlat3D \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#foralli","title":"Array.foralli","content":"Array.foralli \nforalli \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#insertManyAt","title":"Array.insertManyAt","content":"Array.insertManyAt \ninsertManyAt \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array.html#removeAt","title":"Array.removeAt","content":"Array.removeAt \nremoveAt \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array4d.html","title":"Array4D","content":"Array4D \n \nArray4D.map \nmap"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array4d.html#map","title":"Array4D.map","content":"Array4D.map \nmap \n\n Builds a new array whose elements are the results of applying the given function to each of the elements of the array.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html","title":"Array5D","content":"Array5D \n \nArray5D.zeroCreate \nzeroCreate \nArray5D.get \nget \nArray5D.set \nset \nArray5D.length1 \nlength1 \nArray5D.length2 \nlength2 \nArray5D.length3 \nlength3 \nArray5D.length4 \nlength4 \nArray5D.length5 \nlength5 \nArray5D.init \ninit \nArray5D.create \ncreate \nArray5D.map \nmap"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#zeroCreate","title":"Array5D.zeroCreate","content":"Array5D.zeroCreate \nzeroCreate \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#get","title":"Array5D.get","content":"Array5D.get \nget \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#set","title":"Array5D.set","content":"Array5D.set \nset \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#length1","title":"Array5D.length1","content":"Array5D.length1 \nlength1 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#length2","title":"Array5D.length2","content":"Array5D.length2 \nlength2 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#length3","title":"Array5D.length3","content":"Array5D.length3 \nlength3 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#length4","title":"Array5D.length4","content":"Array5D.length4 \nlength4 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#length5","title":"Array5D.length5","content":"Array5D.length5 \nlength5 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#init","title":"Array5D.init","content":"Array5D.init \ninit \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#create","title":"Array5D.create","content":"Array5D.create \ncreate \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array5d.html#map","title":"Array5D.map","content":"Array5D.map \nmap \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html","title":"Array6D","content":"Array6D \n \nArray6D.zeroCreate \nzeroCreate \nArray6D.get \nget \nArray6D.set \nset \nArray6D.length1 \nlength1 \nArray6D.length2 \nlength2 \nArray6D.length3 \nlength3 \nArray6D.length4 \nlength4 \nArray6D.length5 \nlength5 \nArray6D.length6 \nlength6 \nArray6D.init \ninit \nArray6D.create \ncreate \nArray6D.map \nmap"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#zeroCreate","title":"Array6D.zeroCreate","content":"Array6D.zeroCreate \nzeroCreate \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#get","title":"Array6D.get","content":"Array6D.get \nget \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#set","title":"Array6D.set","content":"Array6D.set \nset \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#length1","title":"Array6D.length1","content":"Array6D.length1 \nlength1 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#length2","title":"Array6D.length2","content":"Array6D.length2 \nlength2 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#length3","title":"Array6D.length3","content":"Array6D.length3 \nlength3 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#length4","title":"Array6D.length4","content":"Array6D.length4 \nlength4 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#length5","title":"Array6D.length5","content":"Array6D.length5 \nlength5 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#length6","title":"Array6D.length6","content":"Array6D.length6 \nlength6 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#init","title":"Array6D.init","content":"Array6D.init \ninit \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#create","title":"Array6D.create","content":"Array6D.create \ncreate \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-array6d.html#map","title":"Array6D.map","content":"Array6D.map \nmap \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-arraynd.html","title":"ArrayND","content":"ArrayND \n \nArrayND.init \ninit \nArrayND.zeroCreate \nzeroCreate"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-arraynd.html#init","title":"ArrayND.init","content":"ArrayND.init \ninit \n\n Initializes an array with a given shape and initializer function.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-arraynd.html#zeroCreate","title":"ArrayND.zeroCreate","content":"ArrayND.zeroCreate \nzeroCreate \n\n Initializes an array with a given shape and initializer function.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html","title":"DataConverter","content":"DataConverter \n\n Contains operations relating to converting .NET data to tensor data.\n \nDataConverter.formatType \nformatType \nDataConverter.typesMatch \ntypesMatch \nDataConverter.tryFlatArrayAndShape \ntryFlatArrayAndShape \nDataConverter.dataOfValues \ndataOfValues \nDataConverter.dataOfValuesForFloat32 \ndataOfValuesForFloat32 \nDataConverter.dataOfValuesForFloat64 \ndataOfValuesForFloat64 \nDataConverter.dataOfValuesForByte \ndataOfValuesForByte \nDataConverter.dataOfValuesForInt8 \ndataOfValuesForInt8 \nDataConverter.dataOfValuesForInt16 \ndataOfValuesForInt16 \nDataConverter.dataOfValuesForInt32 \ndataOfValuesForInt32 \nDataConverter.dataOfValuesForInt64 \ndataOfValuesForInt64 \nDataConverter.dataOfValuesForBool \ndataOfValuesForBool"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#formatType","title":"DataConverter.formatType","content":"DataConverter.formatType \nformatType \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#typesMatch","title":"DataConverter.typesMatch","content":"DataConverter.typesMatch \ntypesMatch \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#tryFlatArrayAndShape","title":"DataConverter.tryFlatArrayAndShape","content":"DataConverter.tryFlatArrayAndShape \ntryFlatArrayAndShape \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValues","title":"DataConverter.dataOfValues","content":"DataConverter.dataOfValues \ndataOfValues \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForFloat32","title":"DataConverter.dataOfValuesForFloat32","content":"DataConverter.dataOfValuesForFloat32 \ndataOfValuesForFloat32 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForFloat64","title":"DataConverter.dataOfValuesForFloat64","content":"DataConverter.dataOfValuesForFloat64 \ndataOfValuesForFloat64 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForByte","title":"DataConverter.dataOfValuesForByte","content":"DataConverter.dataOfValuesForByte \ndataOfValuesForByte \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForInt8","title":"DataConverter.dataOfValuesForInt8","content":"DataConverter.dataOfValuesForInt8 \ndataOfValuesForInt8 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForInt16","title":"DataConverter.dataOfValuesForInt16","content":"DataConverter.dataOfValuesForInt16 \ndataOfValuesForInt16 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForInt32","title":"DataConverter.dataOfValuesForInt32","content":"DataConverter.dataOfValuesForInt32 \ndataOfValuesForInt32 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForInt64","title":"DataConverter.dataOfValuesForInt64","content":"DataConverter.dataOfValuesForInt64 \ndataOfValuesForInt64 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dataconverter.html#dataOfValuesForBool","title":"DataConverter.dataOfValuesForBool","content":"DataConverter.dataOfValuesForBool \ndataOfValuesForBool \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dictionary.html","title":"Dictionary","content":"Dictionary \n\n Contains extensions related to .NET Dictionary. \n \nDictionary.copyKeys \ncopyKeys \nDictionary.copyValues \ncopyValues"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dictionary.html#copyKeys","title":"Dictionary.copyKeys","content":"Dictionary.copyKeys \ncopyKeys \n\n Gets a fresh array containing the keys of the dictionary.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-dictionary.html#copyValues","title":"Dictionary.copyValues","content":"Dictionary.copyValues \ncopyValues \n\n Gets a fresh array containing the values of the dictionary.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-extensionautoopens.html","title":"ExtensionAutoOpens","content":"ExtensionAutoOpens \n\n Contains auto-opened extensions to the F# programming model.\n \nExtensionAutoOpens.notNull \nnotNull \nExtensionAutoOpens.array3D \narray3D \nExtensionAutoOpens.array4D \narray4D \nExtensionAutoOpens.array5D \narray5D \nExtensionAutoOpens.array6D \narray6D \nExtensionAutoOpens.print \nprint"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-extensionautoopens.html#notNull","title":"ExtensionAutoOpens.notNull","content":"ExtensionAutoOpens.notNull \nnotNull \n\n Indicates if a value is not null.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-extensionautoopens.html#array3D","title":"ExtensionAutoOpens.array3D","content":"ExtensionAutoOpens.array3D \narray3D \n\n Creates a non-jagged 3D array from jagged data.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-extensionautoopens.html#array4D","title":"ExtensionAutoOpens.array4D","content":"ExtensionAutoOpens.array4D \narray4D \n\n Creates a non-jagged 4D array from jagged data.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-extensionautoopens.html#array5D","title":"ExtensionAutoOpens.array5D","content":"ExtensionAutoOpens.array5D \narray5D \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-extensionautoopens.html#array6D","title":"ExtensionAutoOpens.array6D","content":"ExtensionAutoOpens.array6D \narray6D \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-extensionautoopens.html#print","title":"ExtensionAutoOpens.print","content":"ExtensionAutoOpens.print \nprint \n\n Print the given value to the console using the \u0027%A\u0027 printf format specifier\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-ordereddictionary.html","title":"OrderedDictionary","content":"OrderedDictionary \n\n Contains extensions related to .NET OrderedDictionary. \n \nOrderedDictionary.copyKeys \ncopyKeys"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-ordereddictionary.html#copyKeys","title":"OrderedDictionary.copyKeys","content":"OrderedDictionary.copyKeys \ncopyKeys \n\n Gets a fresh array containing the keys of the dictionary.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-randommodule.html","title":"Random","content":"Random \n\n Contains operations relating to pseudo-random number generation.\n \nRandom.shuffledIndices \nshuffledIndices"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-randommodule.html#shuffledIndices","title":"Random.shuffledIndices","content":"Random.shuffledIndices \nshuffledIndices \n\n Returns a function that maps a given index to a shuffled version of the indexes up to the given \u0060length\u0060\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-seq.html","title":"Seq","content":"Seq \n\n Contains extensions to the F# Seq module. \n \nSeq.maxIndex \nmaxIndex \nSeq.minIndex \nminIndex \nSeq.allEqual \nallEqual \nSeq.duplicates \nduplicates \nSeq.hasDuplicates \nhasDuplicates \nSeq.toArrayQuick \ntoArrayQuick"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-seq.html#maxIndex","title":"Seq.maxIndex","content":"Seq.maxIndex \nmaxIndex \n\n Gets the index of the maximum element of the sequence.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-seq.html#minIndex","title":"Seq.minIndex","content":"Seq.minIndex \nminIndex \n\n Gets the index of the minimum element of the sequence.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-seq.html#allEqual","title":"Seq.allEqual","content":"Seq.allEqual \nallEqual \n\n Indicates if all elements of the sequence are equal.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-seq.html#duplicates","title":"Seq.duplicates","content":"Seq.duplicates \nduplicates \n\n Gets the duplicate elements in the sequence.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-seq.html#hasDuplicates","title":"Seq.hasDuplicates","content":"Seq.hasDuplicates \nhasDuplicates \n\n Indicates if a sequence has duplicate elements.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-seq.html#toArrayQuick","title":"Seq.toArrayQuick","content":"Seq.toArrayQuick \ntoArrayQuick \n\n Like Seq.toArray but does not clone the array if the input is already an array\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html","title":"UtilAutoOpens","content":"UtilAutoOpens \n\n Contains auto-opened utilities related to the Furnace programming model.\n \nUtilAutoOpens.memoize \nmemoize \nUtilAutoOpens.saveBinary \nsaveBinary \nUtilAutoOpens.loadBinary \nloadBinary \nUtilAutoOpens.logSqrt2Pi \nlogSqrt2Pi \nUtilAutoOpens.log10Val \nlog10Val \nUtilAutoOpens.indentNewLines \nindentNewLines \nUtilAutoOpens.stringPad \nstringPad \nUtilAutoOpens.stringPadAs \nstringPadAs \nUtilAutoOpens.thousandsInt \nthousandsInt \nUtilAutoOpens.thousandsFloat \nthousandsFloat \nUtilAutoOpens.fileToBase64String \nfileToBase64String \nUtilAutoOpens.pngToHtml \npngToHtml \nUtilAutoOpens.bytesReadable \nbytesReadable \nUtilAutoOpens.(!) \n(!) \nUtilAutoOpens.(:=) \n(:=)"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#memoize","title":"UtilAutoOpens.memoize","content":"UtilAutoOpens.memoize \nmemoize \n\n Returns a function that memoizes the given function using a lookaside table.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#saveBinary","title":"UtilAutoOpens.saveBinary","content":"UtilAutoOpens.saveBinary \nsaveBinary \n\n Saves the given value to the given local file using binary serialization.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#loadBinary","title":"UtilAutoOpens.loadBinary","content":"UtilAutoOpens.loadBinary \nloadBinary \n\n Loads the given value from the given local file using binary serialization.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#logSqrt2Pi","title":"UtilAutoOpens.logSqrt2Pi","content":"UtilAutoOpens.logSqrt2Pi \nlogSqrt2Pi \n\n Value of log(sqrt(2*Math.PI)).\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#log10Val","title":"UtilAutoOpens.log10Val","content":"UtilAutoOpens.log10Val \nlog10Val \n\n Value of log(10).\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#indentNewLines","title":"UtilAutoOpens.indentNewLines","content":"UtilAutoOpens.indentNewLines \nindentNewLines \n\n Indents all lines of the given string by the given number of spaces.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#stringPad","title":"UtilAutoOpens.stringPad","content":"UtilAutoOpens.stringPad \nstringPad \n\n Left-pads a string up to the given length.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#stringPadAs","title":"UtilAutoOpens.stringPadAs","content":"UtilAutoOpens.stringPadAs \nstringPadAs \n\n Left-pads a string to match the length of another string.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#thousandsInt","title":"UtilAutoOpens.thousandsInt","content":"UtilAutoOpens.thousandsInt \nthousandsInt \n\n Formats an integer as a string with comma as thousands separator\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#thousandsFloat","title":"UtilAutoOpens.thousandsFloat","content":"UtilAutoOpens.thousandsFloat \nthousandsFloat \n\n Formats an integer as a string with comma as thousands separator\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#fileToBase64String","title":"UtilAutoOpens.fileToBase64String","content":"UtilAutoOpens.fileToBase64String \nfileToBase64String \n\n Returns the file contents as Base64 encoded string\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#pngToHtml","title":"UtilAutoOpens.pngToHtml","content":"UtilAutoOpens.pngToHtml \npngToHtml \n\n Given a PNG image file name, returns an HTML image element with the image content included as a Base64 encoded string\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#bytesReadable","title":"UtilAutoOpens.bytesReadable","content":"UtilAutoOpens.bytesReadable \nbytesReadable \n\n Return a human-readable string representation of the given value in Bytes.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#(!)","title":"UtilAutoOpens.(!)","content":"UtilAutoOpens.(!) \n(!) \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-utilautoopens.html#(:=)","title":"UtilAutoOpens.(:=)","content":"UtilAutoOpens.(:=) \n(:=) \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-globalnestinglevel.html","title":"GlobalNestingLevel","content":"GlobalNestingLevel \n\n Contains operations to get, set or reset the global nesting level for differentiation operations.\n \nGlobalNestingLevel.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nGlobalNestingLevel.Next \nNext \nGlobalNestingLevel.Reset \nReset \nGlobalNestingLevel.Set \nSet \nGlobalNestingLevel.Current \nCurrent"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-globalnestinglevel.html#\u0060\u0060.ctor\u0060\u0060","title":"GlobalNestingLevel.\u0060\u0060.ctor\u0060\u0060","content":"GlobalNestingLevel.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-globalnestinglevel.html#Next","title":"GlobalNestingLevel.Next","content":"GlobalNestingLevel.Next \nNext \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-globalnestinglevel.html#Reset","title":"GlobalNestingLevel.Reset","content":"GlobalNestingLevel.Reset \nReset \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-globalnestinglevel.html#Set","title":"GlobalNestingLevel.Set","content":"GlobalNestingLevel.Set \nSet \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-globalnestinglevel.html#Current","title":"GlobalNestingLevel.Current","content":"GlobalNestingLevel.Current \nCurrent \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-nestinglevel.html","title":"NestingLevel","content":"NestingLevel \n\n Represents a differentiation nesting level.\n \nNestingLevel.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nNestingLevel.Next \nNext \nNestingLevel.Current \nCurrent"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-nestinglevel.html#\u0060\u0060.ctor\u0060\u0060","title":"NestingLevel.\u0060\u0060.ctor\u0060\u0060","content":"NestingLevel.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-nestinglevel.html#Next","title":"NestingLevel.Next","content":"NestingLevel.Next \nNext \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-nestinglevel.html#Current","title":"NestingLevel.Current","content":"NestingLevel.Current \nCurrent \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html","title":"Random","content":"Random \n\n Contains operations relating to pseudo-random number generation.\n \nRandom.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nRandom.Bernoulli \nBernoulli \nRandom.Bernoulli \nBernoulli \nRandom.Choice \nChoice \nRandom.Choice \nChoice \nRandom.ChoiceIndex \nChoiceIndex \nRandom.Double \nDouble \nRandom.Double \nDouble \nRandom.Integer \nInteger \nRandom.Integer \nInteger \nRandom.Multinomial \nMultinomial \nRandom.Multinomial \nMultinomial \nRandom.Normal \nNormal \nRandom.Normal \nNormal \nRandom.Seed \nSeed \nRandom.Shuffle \nShuffle \nRandom.UUID \nUUID \nRandom.Uniform \nUniform \nRandom.Uniform \nUniform"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#\u0060\u0060.ctor\u0060\u0060","title":"Random.\u0060\u0060.ctor\u0060\u0060","content":"Random.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Bernoulli","title":"Random.Bernoulli","content":"Random.Bernoulli \nBernoulli \n\n Samples a random value from the Bernoulli distribution.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Bernoulli","title":"Random.Bernoulli","content":"Random.Bernoulli \nBernoulli \n\n Samples a random value from the Bernoulli distribution with the given probability.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Choice","title":"Random.Choice","content":"Random.Choice \nChoice \n\n Samples a value at random from the given array using the given categorical probabilities.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Choice","title":"Random.Choice","content":"Random.Choice \nChoice \n\n Samples a value at random from the given array.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#ChoiceIndex","title":"Random.ChoiceIndex","content":"Random.ChoiceIndex \nChoiceIndex \n\n Samples an index at random with the given categorical probabilities.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Double","title":"Random.Double","content":"Random.Double \nDouble \n\n Samples a double value in the given range [low, high)\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Double","title":"Random.Double","content":"Random.Double \nDouble \n\n Samples a double value in the range [0, 1)\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Integer","title":"Random.Integer","content":"Random.Integer \nInteger \n\n Samples a random integer in the given range [low, high).\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Integer","title":"Random.Integer","content":"Random.Integer \nInteger \n\n Samples a non-negative random integer\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Multinomial","title":"Random.Multinomial","content":"Random.Multinomial \nMultinomial \n\n Returns a 2D array where each row contains \u0060numSamples\u0060 indices sampled from the multinomial probability distribution defined by the probabilities in the corresponding row of the \u0060probs\u0060 array.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Multinomial","title":"Random.Multinomial","content":"Random.Multinomial \nMultinomial \n\n Samples a number of random values array of random values for the given weighted distribution\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Normal","title":"Random.Normal","content":"Random.Normal \nNormal \n\n Samples a random value from the normal distribution with the given mean and standard deviation.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Normal","title":"Random.Normal","content":"Random.Normal \nNormal \n\n Samples a random value from the standard normal distribution with mean 0 and standard deviation 1.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Seed","title":"Random.Seed","content":"Random.Seed \nSeed \n\n Sets the random seed.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Shuffle","title":"Random.Shuffle","content":"Random.Shuffle \nShuffle \n\n Returns an array that is a randomly-shuffled version of the given array, using the Durstenfeld/Knuth shuffle.\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#UUID","title":"Random.UUID","content":"Random.UUID \nUUID \n\n Returns a universally unique identifier (UUID) string\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Uniform","title":"Random.Uniform","content":"Random.Uniform \nUniform \n\n Samples a random value from the uniform distribution with the given parameters [low, high).\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-random.html#Uniform","title":"Random.Uniform","content":"Random.Uniform \nUniform \n\n Samples a random value from the standard uniform distribution over the interval [0,1).\n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-helpers.html","title":"helpers","content":"helpers \n \nhelpers.printVal \nprintVal \nhelpers.toPython \ntoPython \nhelpers.runScript \nrunScript"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-helpers.html#printVal","title":"helpers.printVal","content":"helpers.printVal \nprintVal \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-helpers.html#toPython","title":"helpers.toPython","content":"helpers.toPython \ntoPython \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-helpers.html#runScript","title":"helpers.runScript","content":"helpers.runScript \nrunScript \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html","title":"Pyplot","content":"Pyplot \n \nPyplot.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \nPyplot.addPython \naddPython \nPyplot.figure \nfigure \nPyplot.hist \nhist \nPyplot.legend \nlegend \nPyplot.plot \nplot \nPyplot.plot \nplot \nPyplot.savefig \nsavefig \nPyplot.tightLayout \ntightLayout \nPyplot.xlabel \nxlabel \nPyplot.xscale \nxscale \nPyplot.ylabel \nylabel \nPyplot.yscale \nyscale \nPyplot.script \nscript"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#\u0060\u0060.ctor\u0060\u0060","title":"Pyplot.\u0060\u0060.ctor\u0060\u0060","content":"Pyplot.\u0060\u0060.ctor\u0060\u0060 \n\u0060\u0060.ctor\u0060\u0060 \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#addPython","title":"Pyplot.addPython","content":"Pyplot.addPython \naddPython \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#figure","title":"Pyplot.figure","content":"Pyplot.figure \nfigure \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#hist","title":"Pyplot.hist","content":"Pyplot.hist \nhist \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#legend","title":"Pyplot.legend","content":"Pyplot.legend \nlegend \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#plot","title":"Pyplot.plot","content":"Pyplot.plot \nplot \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#plot","title":"Pyplot.plot","content":"Pyplot.plot \nplot \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#savefig","title":"Pyplot.savefig","content":"Pyplot.savefig \nsavefig \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#tightLayout","title":"Pyplot.tightLayout","content":"Pyplot.tightLayout \ntightLayout \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#xlabel","title":"Pyplot.xlabel","content":"Pyplot.xlabel \nxlabel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#xscale","title":"Pyplot.xscale","content":"Pyplot.xscale \nxscale \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#ylabel","title":"Pyplot.ylabel","content":"Pyplot.ylabel \nylabel \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#yscale","title":"Pyplot.yscale","content":"Pyplot.yscale \nyscale \n"},{"uri":"https://fsprojects.github.io/Furnace/reference/furnace-util-pyplot.html#script","title":"Pyplot.script","content":"Pyplot.script \nscript \n"},{"uri":"https://fsprojects.github.io/Furnace/differentiable-programming.html","title":"differentiable-programming","content":"// PyTorch style\n\n// Furnace style"},{"uri":"https://fsprojects.github.io/Furnace/extensions.html","title":"Extending Furnace\n","content":"(*** condition: prepare ***)\n#I \u0022../tests/Furnace.Tests/bin/Debug/net6.0\u0022\n#r \u0022Furnace.Core.dll\u0022\n#r \u0022Furnace.Data.dll\u0022\n#r \u0022Furnace.Backends.Reference.dll\u0022\n#r \u0022Furnace.Backends.Torch.dll\u0022\n// These are needed to make fsdocs --eval work. If we don\u0027t select a backend like this in the beginning, we get erratic behavior.\nFurnace.FurnaceImage.config(backend=Furnace.Backend.Reference)\nFurnace.FurnaceImage.seed(123)\n\n(*** condition: fsx ***)\n#if FSX\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n#endif // FSX\n(*** condition: ipynb ***)\n#if IPYNB\n// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n// !bash \u003C(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n#endif // IPYNB\n(*** condition: ipynb ***)\n#if IPYNB\n// Import Furnace package\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n\n// Set dotnet interactive formatter to plaintext\nFormatter.SetPreferredMimeTypesFor(typeof\u003Cobj\u003E, \u0022text/plain\u0022)\nFormatter.Register(fun (x:obj) (writer: TextWriter) -\u003E fprintfn writer \u0022%120A\u0022 x )\n#endif // IPYNB\n\n(**\n[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/{{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath={{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Script](img/badge-script.svg)]({{fsdocs-source-basename}}.fsx)\u0026emsp;\n[![Script](img/badge-notebook.svg)]({{fsdocs-source-basename}}.ipynb)\n\n# Extending Furnace\n\n\nFurnace provides most of the essential operations found in tensor libraries such as [NumPy](https://numpy.org/), [PyTorch](https://pytorch.org/), and [TensorFlow](https://www.tensorflow.org/). All differentiable operations support the forward, reverse, and nested differentiation modes. \n\nWhen implementing new operations, you should prefer to implement these as compositions of existing Furnace \u0060cref:T:Furnace.Tensor\u0060 operations, which would give you differentiability out of the box.\n\nIn the rare cases where you need to extend Furnace with a completely new differentiable operation that cannot be implemented as a composition of existing operations, you can use the provided extension API.\n\n## Simple elementwise functions\n\nIf the function you would like to implement is a simple elementwise function, you can use the \u0060cref:T:Furnace.UnaryOpElementwise\u0060 or \u0060cref:T:Furnace.BinaryOpElementwise\u0060 types to define your function and its derivatives. The forward, reverse, and nested differentiation rules for the function are automatically generated by the type. The documentation of these two types detail how they should be instantiated.\n\nLet\u0027s see several examples.\n\n$ f(a) = \\mathrm{sin}(a) $, with derivative $ \\frac{\\partial f(a)}{\\partial a} = \\mathrm{cos}(a) \\;$.\n*)\nopen Furnace\n\ntype Tensor with\n member a.sin() = \n Tensor.Op\n { new UnaryOpElementwise(\u0022sin\u0022) with \n member _.fRaw(a) = a.SinT()\n member _.dfda(a,f) = a.cos()\n }\n (a)\n\n(**\n$ f(a) = \\mathrm{log}(a) $, with derivative $ \\frac{\\partial f(a)}{\\partial a} = 1/a \\;$.\n*)\ntype Tensor with\n member a.log() =\n Tensor.Op\n { new UnaryOpElementwise(\u0022log\u0022) with\n member _.fRaw(a) = a.LogT()\n member _.dfda(a,f) = 1/a\n }\n (a)\n\n\n(**\n$ f(a, b) = ab $, with derivatives $ \\frac{\\partial f(a, b)}{\\partial a} = b $, $ \\frac{\\partial f(a, b)}{\\partial b} = a \\;$.\n*)\ntype Tensor with\n member a.mul(b) =\n Tensor.Op\n { new BinaryOpElementwise(\u0022mul\u0022) with\n member _.fRaw(a,b) = a.MulTT(b)\n member _.dfda(a,b,f) = b\n member _.dfdb(a,b,f) = a\n }\n (a,b)\n\n(**\n$ f(a, b) = a^b $, with derivatives $ \\frac{\\partial f(a, b)}{\\partial a} = b a^{b-1} $, $ \\frac{\\partial f(a, b)}{\\partial b} = a^b \\mathrm{log}(a) \\;$. Note the use of the argument \u0060f\u0060 in the derivative definitions that makes use of the pre-computed value of $ f(a, b) = a^b $ that is available to the derivative implementation.\n*)\ntype Tensor with\n member a.pow(b) =\n Tensor.Op\n { new BinaryOpElementwise(\u0022pow\u0022) with\n member _.fRaw(a,b) = a.PowTT(b)\n member _.dfda(a,b,f) = b * f / a // equivalent to b * a.pow(b-1)\n member _.dfdb(a,b,f) = f * a.log() // equivalent to a.pow(b) * a.log()\n }\n (a,b)\n\n\n(**\n## General functions\n\nFor more complicated functions, you can use the most general way of defining functions using the \u0060cref:T:Furnace.UnaryOp\u0060 or \u0060cref:T:Furnace.BinaryOp\u0060 types, which allow you to define the full forward and reverse mode differentiation rules. The documentation of these two types detail how they should be instantiated.\n\nLet\u0027s see several examples.\n\n$ f(A) = A^{\\intercal} $, with the forward derivative propagation rule $ \\frac{\\partial f(A)}{\\partial X} = \\frac{\\partial A}{\\partial X} \\frac{\\partial f(A)}{\\partial A} = (\\frac{\\partial A}{\\partial X})^{\\intercal} $ and the reverse derivative propagation rule $ \\frac{\\partial Y}{\\partial A} = \\frac{\\partial Y}{\\partial f(A)} \\frac{\\partial f(A)}{\\partial A} = (\\frac{\\partial Y}{\\partial f(A)})^{\\intercal} \\;$.\n*)\ntype Tensor with\n member a.transpose() =\n Tensor.Op\n { new UnaryOp(\u0022transpose\u0022) with\n member _.fRaw(a) = a.TransposeT2()\n member _.ad_dfda(a,ad,f) = ad.transpose()\n member _.fd_dfda(a,f,fd) = fd.transpose()\n }\n (a)\n\n\n(**\n$ f(A, B) = AB $, with the forward derivative propagation rule $ \\frac{\\partial(A, B)}{\\partial X} = \\frac{\\partial A}{\\partial X} \\frac{\\partial f(A, B)}{\\partial A} \u002B \\frac{\\partial B}{\\partial X} \\frac{\\partial f(A, B)}{\\partial B} = \\frac{\\partial A}{\\partial X} B \u002B A \\frac{\\partial B}{\\partial X}$ and the reverse propagation rule $ \\frac{\\partial Y}{\\partial A} = \\frac{\\partial Y}{\\partial f(A, B)} \\frac{\\partial f(A, B)}{\\partial A} = \\frac{\\partial Y}{\\partial f(A, B)} B^{\\intercal} $, $ \\frac{\\partial Y}{\\partial B} = \\frac{\\partial Y}{\\partial f(A, B)} \\frac{\\partial f(A, B)}{B} = A^{\\intercal} \\frac{\\partial Y}{\\partial f(A, B)} \\;$.\n*)\ntype Tensor with\n member a.matmul(b) =\n Tensor.Op\n { new BinaryOp(\u0022matmul\u0022) with\n member _.fRaw(a,b) = a.MatMulTT(b)\n member _.ad_dfda(a,ad,b,f) = ad.matmul(b)\n member _.bd_dfdb(a,b,bd,f) = a.matmul(bd)\n member _.fd_dfda(a,b,f,fd) = fd.matmul(b.transpose())\n member _.fd_dfdb(a,b,f,fd) = a.transpose().matmul(fd)\n }\n (a,b)"},{"uri":"https://fsprojects.github.io/Furnace/probability-distributions.html","title":"probability-distributions","content":""},{"uri":"https://fsprojects.github.io/Furnace/nested-derivatives.html","title":"nested-derivatives","content":""},{"uri":"https://fsprojects.github.io/Furnace/tutorial-vae.html","title":"tutorial-vae","content":""},{"uri":"https://fsprojects.github.io/Furnace/tutorial-gan.html","title":"tutorial-gan","content":""},{"uri":"https://fsprojects.github.io/Furnace/tensors.html","title":"tensors","content":"(*** condition: prepare ***)\n#I \u0022../tests/Furnace.Tests/bin/Debug/net6.0\u0022\n#r \u0022Furnace.Core.dll\u0022\n#r \u0022Furnace.Data.dll\u0022\n#r \u0022Furnace.Backends.Reference.dll\u0022\n#r \u0022Furnace.Backends.Torch.dll\u0022\n// These are needed to make fsdocs --eval work. If we don\u0027t select a backend like this in the beginning, we get erratic behavior.\nFurnace.FurnaceImage.config(backend=Furnace.Backend.Reference)\nFurnace.FurnaceImage.seed(123)\n\n(*** condition: fsx ***)\n#if FSX\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n#endif // FSX\n(*** condition: ipynb ***)\n#if IPYNB\n// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n// !bash \u003C(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n#endif // IPYNB\n(*** condition: ipynb ***)\n#if IPYNB\n// Import Furnace package\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n\n// Set dotnet interactive formatter to plaintext\nFormatter.SetPreferredMimeTypesFor(typeof\u003Cobj\u003E, \u0022text/plain\u0022)\nFormatter.Register(fun (x:obj) (writer: TextWriter) -\u003E fprintfn writer \u0022%120A\u0022 x )\n#endif // IPYNB\n\n(**\n[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/{{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath={{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Script](img/badge-script.svg)]({{fsdocs-source-basename}}.fsx)\u0026emsp;\n[![Script](img/badge-notebook.svg)]({{fsdocs-source-basename}}.ipynb)\n\n* The \u0060cref:T:Furnace.FurnaceImage\u0060 API\n\n* The \u0060cref:T:Furnace.Tensor\u0060 type\n\nSaving tensors as image and loading images as tensors\n\n\n## Converting between Tensors and arrays\n\nSystem.Array and F# arrays\n\n*)\n\nopen Furnace\n\n// Tensor\nlet t1 = FurnaceImage.tensor [ 0.0 .. 0.2 .. 1.0 ]\n\n// System.Array\nlet a1 = t1.toArray()\n\n// []\u003Cfloat32\u003E\nlet a1b = t1.toArray() :?\u003E float32[]\n\n// Tensor\nlet t2 = FurnaceImage.randn([3;3;3])\n\n// [,,]\u003Cfloat32\u003E\nlet a2 = t2.toArray() :?\u003E float32[,,]"},{"uri":"https://fsprojects.github.io/Furnace/install.html","title":"Installing\n","content":"(*** condition: prepare ***)\n#I \u0022../tests/Furnace.Tests/bin/Debug/net6.0\u0022\n#r \u0022Furnace.Core.dll\u0022\n#r \u0022Furnace.Data.dll\u0022\n#r \u0022Furnace.Backends.Reference.dll\u0022\n#r \u0022Furnace.Backends.Torch.dll\u0022\n// These are needed to make fsdocs --eval work. If we don\u0027t select a backend like this in the beginning, we get erratic behavior.\nFurnace.FurnaceImage.config(backend=Furnace.Backend.Reference)\nFurnace.FurnaceImage.seed(123)\n\n(*** condition: fsx ***)\n#if FSX\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n#endif // FSX\n(*** condition: ipynb ***)\n#if IPYNB\n// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n// !bash \u003C(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n#endif // IPYNB\n(*** condition: ipynb ***)\n#if IPYNB\n// Import Furnace package\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n\n// Set dotnet interactive formatter to plaintext\nFormatter.SetPreferredMimeTypesFor(typeof\u003Cobj\u003E, \u0022text/plain\u0022)\nFormatter.Register(fun (x:obj) (writer: TextWriter) -\u003E fprintfn writer \u0022%120A\u0022 x )\n#endif // IPYNB\n\n(**\n[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/{{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath={{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Script](img/badge-script.svg)]({{fsdocs-source-basename}}.fsx)\u0026emsp;\n[![Script](img/badge-notebook.svg)]({{fsdocs-source-basename}}.ipynb)\n\n# Installing\n\nFurnace runs on [dotnet](https://dotnet.microsoft.com/), a cross-platform, open-source platform supported on Linux, macOS, and Windows.\n\nThere are various ways in which you can run Furnace, the main ones being: [interactive notebooks](https://github.com/dotnet/interactive) supporting [Visual Studio Code](https://code.visualstudio.com/) and [Jupyter](https://jupyter.org/); running in a [REPL](https://github.com/jonsequitur/dotnet-repl); running [script files](https://docs.microsoft.com/en-us/dotnet/fsharp/tools/fsharp-interactive/); and [compiling, packing, and publishing](https://docs.microsoft.com/en-us/dotnet/core/introduction) performant binaries.\n\n\n## Interactive Notebooks and Scripts\n\nYou can use Furnace in [dotnet interactive](https://github.com/dotnet/interactive) notebooks in [Visual Studio Code](https://code.visualstudio.com/) or [Jupyter](https://jupyter.org/), or in F# scripts (\u0060.fsx\u0060 files), by referencing the package as follows:\n\n // Use one of the following three lines\n #r \u0022nuget: Furnace-cpu\u0022 // Use the latest version\n #r \u0022nuget: Furnace-cpu, *-*\u0022 // Use the latest pre-release version\n #r \u0022nuget: Furnace-cpu, 1.0.1\u0022 // Use a specific version\n\n open Furnace\n\n\u003C/br\u003E\n\u003Cimg src=\u0022img/anim-intro-1.gif\u0022 width=\u002285%\u0022 /\u003E\n\n## Dotnet Applications\n\nYou can add Furnace to your dotnet application using the [dotnet](https://dotnet.microsoft.com/) command-line interface (CLI).\n\nFor example, the following creates a new F# console application and adds the latest pre-release version of the \u0060Furnace-cpu\u0060 package as a dependency.\n\n dotnet new console -lang \u0022F#\u0022 -o src/app\n cd src/app\n dotnet add package --prerelease Furnace-cpu\n dotnet run\n\n## Packages\n\nWe provide several package bundles for a variety of use cases.\n\n* [Furnace-cpu](https://www.nuget.org/packages/Furnace-cpu)\u003C/br\u003E\n Includes LibTorch CPU binaries for Linux, macOS, and Windows.\n* [Furnace-cuda-linux](https://www.nuget.org/packages/Furnace-cuda-linux) / [Furnace-cuda-windows](https://www.nuget.org/packages/Furnace-cuda-windows)\u003C/br\u003E\n Include LibTorch CPU and CUDA GPU binaries for Linux and Windows. Large download.\n* [Furnace-lite](https://www.nuget.org/packages/Furnace-lite)\u003C/br\u003E\n Includes the Torch backend but not the LibTorch binaries. \n\n### Using local LibTorch binaries (optional)\n\nYou can combine the \u0060Furnace-lite\u0060 package bundle with existing local native binaries of LibTorch for your OS (Linux, Mac, or Windows) installed through other means. \n\nLibTorch is the main tensor computation core implemented in C\u002B\u002B/CUDA and it is used by PyTorch in Python and by other projects in various programming languages. The following are two common ways of having LibTorch in your system.\n\n* If you use Python and have [PyTorch](https://pytorch.org/) installed, this comes with LibTorch as a part of the PyTorch distribution. If your GPU works in this PyTorch installation without any issues, it will also work in Furnace.\n* You can download the native LibTorch package without Python by following the [get started](https://pytorch.org/get-started/locally/) instructions in the PyTorch website, and extracting the downloaded archive to a folder in your system.\n\nBefore using the \u0060Torch\u0060 backend in Furnace, you will have to add an explicit load of the LibTorch native library, which you can do as follows. In order to find the location of LibTorch binaries, searching for \u0060libtorch.so\u0060 in your system might be helpful. Note that this file is called \u0060libtorch.so\u0060 in Linux, \u0060libtorch.dylib\u0060 in macOS, and \u0060torch.dll\u0060 in Windows.\n\n open System.Runtime.InteropServices\n NativeLibrary.Load(\u0022/home/user/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so\u0022)\n\n\n## Backends and Devices\n\nFurnace currently provides two computation backends.\n\n* The \u0060Torch\u0060 backend is the default and recommended backend based on [LibTorch](https://pytorch.org/cppdocs/), using the same C\u002B\u002B and CUDA implementations for tensor computations that power [PyTorch](https://pytorch.org/). On top of these raw tensors (LibTorch\u0027s ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. This backend requires platform-specific binaries of LibTorch, which we provide and test on Linux, macOS, and Windows.\n\n* The \u0060Reference\u0060 backend is implemented purely in F# and can run on any hardware platform where [dotnet](https://dotnet.microsoft.com/) can run (for example iOS, Android, Raspberry Pi). This backend has reasonable performance for use cases dominated by scalar and small tensor operations, and is not recommended for use cases involving large tensor operations (such as machine learning). This backend is always available.\n\n### Configuration of Default Backend, Device, and Tensor Type\n\nSelection of the default backend, device, and tensor type is done using \u0060cref:M:Furnace.FurnaceImage.config\u0060.\n\n* \u0060cref:T:Furnace.Dtype\u0060 choices available: \u0060BFloat16\u0060, \u0060Bool\u0060, \u0060Byte\u0060, \u0060Float16\u0060, \u0060Float32\u0060, \u0060Float64\u0060, \u0060Int16\u0060, \u0060Int32\u0060, \u0060Int64\u0060, \u0060Int8\u0060\n\n* \u0060cref:T:Furnace.Device\u0060 choices available: \u0060CPU\u0060, \u0060GPU\u0060\n\n* \u0060cref:T:Furnace.Backend\u0060 choices available: \u0060Reference\u0060, \u0060Torch\u0060\n\nFor example, the following selects the \u0060Torch\u0060 backend with single precision tensors as the default tensor type and GPU (CUDA) execution.\n\n*)\n\nopen Furnace\n\nFurnaceImage.config(dtype=Dtype.Float32, device=Device.GPU, backend=Backend.Torch)\n\n(**\nThe following selects the \u0060Reference\u0060 backend.\n*)\n\nFurnaceImage.config(backend=Backend.Reference)\n\n(**\nA tensor\u0027s backend and device can be inspected as follows.\n\n*)\nlet t = FurnaceImage.tensor [ 0 .. 10 ]\n\nlet device = t.device\nlet backend = t.backend\n\n(**\nTensors can be moved between devices (for example from CPU to GPU) using \u0060cref:M:Furnace.Tensor.move(Furnace.Device)\u0060. For example:\n*)\nlet t2 = t.move(Device.GPU)\n\n(**\n## Developing Furnace Libraries\n\nTo develop libraries built on Furnace, you can use the following guideline to reference the various packages.\n\n* Reference \u0060Furnace.Core\u0060 and \u0060Furnace.Data\u0060 in your library code.\n* Reference \u0060Furnace.Backends.Reference\u0060 in your correctness testing code.\n* Reference \u0060Furnace.Backends.Torch\u0060 and \u0060libtorch-cpu\u0060 in your CPU testing code.\n* Reference \u0060Furnace.Backends.Torch\u0060 and \u0060libtorch-cuda-linux\u0060 or \u0060libtorch-cuda-windows\u0060 in your (optional) GPU testing code.\n\n*)"},{"uri":"https://fsprojects.github.io/Furnace/tutorial-classifier.html","title":"tutorial-classifier","content":""},{"uri":"https://fsprojects.github.io/Furnace/models.html","title":"models","content":"(*** condition: prepare ***)\n#I \u0022../tests/Furnace.Tests/bin/Debug/net6.0\u0022\n#r \u0022Furnace.Core.dll\u0022\n#r \u0022Furnace.Data.dll\u0022\n#r \u0022Furnace.Backends.Reference.dll\u0022\n#r \u0022Furnace.Backends.Torch.dll\u0022\n// These are needed to make fsdocs --eval work. If we don\u0027t select a backend like this in the beginning, we get erratic behavior.\nFurnace.FurnaceImage.config(backend=Furnace.Backend.Reference)\nFurnace.FurnaceImage.seed(123)\n\n(**\nTest \n*)\n\nopen Furnace\n\nFurnaceImage.config(backend=Backend.Reference)\n\nlet a = FurnaceImage.tensor([1,2,3])\nprintfn \u0022%A\u0022 a\n(*** include-fsi-output ***)"},{"uri":"https://fsprojects.github.io/Furnace/tutorial-language.html","title":"tutorial-language","content":""},{"uri":"https://fsprojects.github.io/Furnace/index.html","title":"Furnace: Differentiable Tensor Programming Made Simple\n","content":"(*** condition: prepare ***)\n#I \u0022../tests/Furnace.Tests/bin/Debug/net6.0\u0022\n#r \u0022Furnace.Core.dll\u0022\n#r \u0022Furnace.Data.dll\u0022\n#r \u0022Furnace.Backends.Reference.dll\u0022\n#r \u0022Furnace.Backends.Torch.dll\u0022\n// These are needed to make fsdocs --eval work. If we don\u0027t select a backend like this in the beginning, we get erratic behavior.\nFurnace.FurnaceImage.config(backend=Furnace.Backend.Reference)\nFurnace.FurnaceImage.seed(123)\n\n(*** condition: fsx ***)\n#if FSX\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n#endif // FSX\n(*** condition: ipynb ***)\n#if IPYNB\n// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n// !bash \u003C(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n#endif // IPYNB\n(*** condition: ipynb ***)\n#if IPYNB\n// Import Furnace package\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n\n// Set dotnet interactive formatter to plaintext\nFormatter.SetPreferredMimeTypesFor(typeof\u003Cobj\u003E, \u0022text/plain\u0022)\nFormatter.Register(fun (x:obj) (writer: TextWriter) -\u003E fprintfn writer \u0022%120A\u0022 x )\n#endif // IPYNB\n\n(**\n[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/gh-pages/{{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Script](img/badge-script.svg)]({{fsdocs-source-basename}}.fsx)\u0026emsp;\n[![Script](img/badge-notebook.svg)]({{fsdocs-source-basename}}.ipynb)\n\n# Furnace: Differentiable Tensor Programming Made Simple\n\nFurnace is a tensor library with support for [differentiable programming](https://en.wikipedia.org/wiki/Differentiable_programming).\nIt is designed for use in machine learning, probabilistic programming, optimization and other domains.\n\n\u003Cbutton class=\u0022button\u0022 style=\u0022vertical-align:middle\u0022 onclick=\u0022window.location.href=\u0027{{root}}install.html\u0027\u0022\u003E\u003Cspan\u003EInstall \u00BB\u003C/span\u003E\u003C/button\u003E\n\n## Key Features\n\n\uD83D\uDDF9 Nested and mixed-mode differentiation\n\n\uD83D\uDDF9 Common optimizers, model elements, differentiable probability distributions\n\n\uD83D\uDDF9 F# for robust functional programming \n\n\uD83D\uDDF9 PyTorch familiar naming and idioms, efficient LibTorch CUDA/C\u002B\u002B tensors with GPU support\n\n\uD83D\uDDF9 Linux, macOS, Windows supported\n\n\uD83D\uDDF9 Use interactive notebooks in Jupyter and Visual Studio Code\n\n\uD83D\uDDF9 100% open source \n\n\n## Differentiable Programming\n\nFurnace provides world-leading automatic differentiation capabilities for tensor code, including composable gradients, Hessians, Jacobians, directional derivatives, and matrix-free Hessian- and Jacobian-vector products over arbitrary user code. This goes beyond conventional tensor libraries such as PyTorch and TensorFlow, allowing the use of nested forward and reverse differentiation up to any level. \n\nWith Furnace, you can compute higher-order derivatives efficiently and differentiate functions that are internally making use of differentiation and gradient-based optimization. \n\n\u003C/br\u003E\n\u003Cimg src=\u0022img/anim-intro-2.gif\u0022 width=\u002285%\u0022 /\u003E\n\n## Practical, Familiar and Efficient\n\nFurnace comes with a [LibTorch](https://pytorch.org/cppdocs/) backend, using the same C\u002B\u002B and CUDA implementations for tensor computations that power [PyTorch](https://pytorch.org/). On top of these raw tensors (LibTorch\u0027s ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. It is tested on Linux, macOS, and Windows, and it supports CUDA and GPUs.\n\nThe Furnace API is designed to be similar to [the PyTorch Python API](https://pytorch.org/docs/stable/index.html) through very similar naming and idioms, and where elements have similar names the PyTorch documentation can generally be used as a guide.\n\nFurnace uses [the incredible F# programming language](https://dot.net/fsharp) for tensor programming. F# code is generally faster and more robust than equivalent Python code, while still being succinct and compact like Python, making it an ideal modern AI and machine learning implementation language. This allows fluent and productive code for tensor programming.\n\n\u003C/br\u003E\n\u003Ciframe width=\u002285%\u0022 src=\u0022https://www.youtube.com/embed/_QnbV6CAWXc\u0022 title=\u0022YouTube video player\u0022 frameborder=\u00220\u0022 allow=\u0022accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\u0022 allowfullscreen\u003E\u003C/iframe\u003E\n\n## Interactive Notebooks\n\nAll documentation pages in this website are interactive notebooks which you can execute directly in your browser without installing anything in your local machine.\n\nUsing the [![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/gh-pages/{{fsdocs-source-basename}}.ipynb) on the top of each page, you can execute the page as an interactive notebook running on cloud servers provided by [Google Colab](https://colab.research.google.com/). \n\nUsing the buttons [![Script](img/badge-script.svg)]({{fsdocs-source-basename}}.fsx) \n[![Script](img/badge-notebook.svg)]({{fsdocs-source-basename}}.ipynb) you can also download a page as a script or an interactive notebook, which you can execute locally in [Jupyter](https://jupyter.org/) or [Visual Studio Code](https://code.visualstudio.com/) using [dotnet interactive](https://github.com/dotnet/interactive).\n\n## Example\n\nDefine and add two tensors:\n*)\n\nopen Furnace\n\nlet t1 = FurnaceImage.tensor [ 0.0 ..0.2.. 1.0 ] // Gives [0., 0.2, 0.4, 0.6, 0.8, 1.]\nlet t2 = FurnaceImage.tensor [ 1, 2, 3, 4, 5, 6 ]\n\nt1 \u002B t2\n(*** include-it ***)\n\n(** \nCompute a convolution:\n*)\nlet t3 = FurnaceImage.tensor [[[[0.0 .. 10.0]]]]\nlet t4 = FurnaceImage.tensor [[[[0.0 ..0.1.. 1.0]]]]\n\nt3.conv2d(t4)\n(*** include-it ***)\n\n(** \nTake the gradient of a vector-to-scalar function:\n*)\n\nlet f (x: Tensor) = x.exp().sum()\n\nFurnaceImage.grad f (FurnaceImage.tensor([1.8, 2.5]))\n(*** include-it ***)\n\n(**\nCompute a nested derivative (checking for [perturbation confusion](https://doi.org/10.1007/s10990-008-9037-1)):\n*)\n\nlet x0 = FurnaceImage.tensor(1.)\nlet y0 = FurnaceImage.tensor(2.)\nFurnaceImage.diff (fun x -\u003E x * FurnaceImage.diff (fun y -\u003E x * y) y0) x0\n(*** include-it ***)\n\n\n(**\nDefine a model and optimize it:\n*)\n(*** do-not-eval-file ***)\nopen Furnace\nopen Furnace.Data\nopen Furnace.Model\nopen Furnace.Compose\nopen Furnace.Util\nopen Furnace.Optim\n\nlet epochs = 2\nlet batchSize = 32\nlet numBatches = 5\n\nlet trainSet = MNIST(\u0022../data\u0022, train=true, transform=id)\nlet trainLoader = trainSet.loader(batchSize=batchSize, shuffle=true)\n\nlet validSet = MNIST(\u0022../data\u0022, train=false, transform=id)\nlet validLoader = validSet.loader(batchSize=batchSize, shuffle=false)\n\nlet encoder =\n Conv2d(1, 32, 4, 2)\n --\u003E FurnaceImage.relu\n --\u003E Conv2d(32, 64, 4, 2)\n --\u003E FurnaceImage.relu\n --\u003E Conv2d(64, 128, 4, 2)\n --\u003E FurnaceImage.flatten(1)\n\nlet decoder =\n FurnaceImage.unflatten(1, [128;1;1])\n --\u003E ConvTranspose2d(128, 64, 4, 2)\n --\u003E FurnaceImage.relu\n --\u003E ConvTranspose2d(64, 32, 4, 3)\n --\u003E FurnaceImage.relu\n --\u003E ConvTranspose2d(32, 1, 4, 2)\n --\u003E FurnaceImage.sigmoid\n\nlet model = VAE([1;28;28], 64, encoder, decoder)\n\nlet lr = FurnaceImage.tensor(0.001)\nlet optimizer = Adam(model, lr=lr)\n\nfor epoch = 1 to epochs do\n let batches = trainLoader.epoch(numBatches)\n for i, x, _ in batches do\n model.reverseDiff()\n let l = model.loss(x)\n l.reverse()\n optimizer.step()\n print $\u0022Epoch: {epoch} minibatch: {i} loss: {l}\u0022 \n\nlet validLoss = \n validLoader.epoch() \n |\u003E Seq.sumBy (fun (_, x, _) -\u003E model.loss(x, normalize=false))\nprint $\u0022Validation loss: {validLoss/validSet.length}\u0022\n\n(**\n\nNumerous other model definition, differentiation, and training patterns are supported. See the tutorials in the left-hand menu and [examples](https://github.com/fsprojects/Furnace/tree/dev/examples) on GitHub.\n\n## More Information\n\nFurnace is developed by [At\u0131l\u0131m G\u00FCne\u015F Baydin](http://www.robots.ox.ac.uk/~gunes/), [Don Syme](https://www.microsoft.com/en-us/research/people/dsyme/)\nand other contributors, having started as a project supervised by the automatic differentiation wizards [Barak Pearlmutter](https://scholar.google.com/citations?user=AxFrw0sAAAAJ\u0026hl=en) and [Jeffrey Siskind](https://scholar.google.com/citations?user=CgSBtPYAAAAJ\u0026hl=en). \n\nPlease join us [on GitHub](https://github.com/fsprojects/Furnace)!\n\n*)"},{"uri":"https://fsprojects.github.io/Furnace/optimization.html","title":"optimization","content":""},{"uri":"https://fsprojects.github.io/Furnace/quickstart.html","title":"Quickstart\n","content":"(*** condition: prepare ***)\n#I \u0022../tests/Furnace.Tests/bin/Debug/net6.0\u0022\n#r \u0022Furnace.Core.dll\u0022\n#r \u0022Furnace.Data.dll\u0022\n#r \u0022Furnace.Backends.Reference.dll\u0022\n#r \u0022Furnace.Backends.Torch.dll\u0022\n#r \u0022nuget: SixLabors.ImageSharp,1.0.1\u0022 \n// These are needed to make fsdocs --eval work. If we don\u0027t select a backend like this in the beginning, we get erratic behavior.\nFurnace.FurnaceImage.config(backend=Furnace.Backend.Reference)\nFurnace.FurnaceImage.seed(123)\nopen Furnace.Util\n\n(*** condition: fsx ***)\n#if FSX\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n#r \u0022nuget: SixLabors.ImageSharp,1.0.1\u0022\n#endif // FSX\n(*** condition: ipynb ***)\n#if IPYNB\n// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n// !bash \u003C(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n#endif // IPYNB\n(*** condition: ipynb ***)\n#if IPYNB\n// Import Furnace package\n#r \u0022nuget: Furnace-lite,{{fsdocs-package-version}}\u0022\n#r \u0022nuget: SixLabors.ImageSharp,1.0.1\u0022\n\n// Set dotnet interactive formatter to plaintext\nFormatter.SetPreferredMimeTypesFor(typeof\u003Cobj\u003E, \u0022text/plain\u0022)\nFormatter.Register(fun (x:obj) (writer: TextWriter) -\u003E fprintfn writer \u0022%120A\u0022 x )\n#endif // IPYNB\n\n(**\n[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/{{fsdocs-source-basename}}.ipynb)\u0026emsp;\n[![Script](img/badge-script.svg)]({{fsdocs-source-basename}}.fsx)\u0026emsp;\n[![Script](img/badge-notebook.svg)]({{fsdocs-source-basename}}.ipynb)\n\n# Quickstart\n\nHere we cover some key tasks involved in a typical machine learning pipeline and how these can be implemented with Furnace. Note that a significant part of Furnace\u0027s design has been influenced by [PyTorch](https://pytorch.org/) and you would feel mostly at home if you have familiarity with PyTorch.\n\n## Datasets and Data Loaders\n\nFurnace provides the \u0060cref:T:Furnace.Data.Dataset\u0060 type that represents a data source and the \u0060cref:T:Furnace.Data.DataLoader\u0060 type that handles the loading of data from datasets and iterating over [minibatches](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Iterative_method) of data.\n\nSee the [Furnace.Data](/Furnace/reference/furnace-data.html) namespace for the full API reference.\n\n### Datasets\n\nFurnace has ready-to-use types that cover main datasets typically used in machine learning, such as \u0060cref:T:Furnace.Data.MNIST\u0060, \u0060cref:T:Furnace.Data.CIFAR10\u0060, \u0060cref:T:Furnace.Data.CIFAR100\u0060, and also more generic dataset types such as \u0060cref:T:Furnace.Data.TensorDataset\u0060 or \u0060cref:T:Furnace.Data.ImageDataset\u0060.\n\nThe following loads the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset and shows one image entry and the corresponding label.\n*)\n\nopen Furnace\nopen Furnace.Data\n\n// First ten images in MNIST training set\nlet dataset = MNIST(\u0022../data\u0022, train=true, transform=id, n=10)\n\n// Inspect a single image and label\nlet data, label = dataset[7]\n\n// Save image to file\ndata.saveImage(\u0022test.png\u0022)\n\n(** *)\n\n(*** hide ***)\npngToHtml \u0022test.png\u0022 64\n(*** include-it-raw ***)\n\n// Inspect data as ASCII and show label\nprintfn \u0022Data: %A\\nLabel: %A\u0022 (data.toImageString()) label\n(*** include-output ***)\n\n(**\n \n### Data Loaders\n\nA data loader handles tasks such as constructing minibatches from an underlying dataset on-the-fly, shuffling the data, and moving the data tensors between devices. In the example below we show a single batch of six MNIST images and their corresponding classification labels.\n\n*)\n\nlet loader = DataLoader(dataset, shuffle=true, batchSize=6)\nlet batch, labels = loader.batch()\n\nprintfn \u0022%A\\nLabels: %A\u0022 (batch.toImageString()) labels\n(*** include-output ***)\n\n(**\n\nIn practice a data loader is typically used to iterate over all minibatches in a given dataset in order to feed each minibatch through a machine learning model. One full iteration over the dataset would be called an \u0022epoch\u0022. Typically you would perform multiple such epochs of iterations during the training of a model.\n\n*)\n\nfor epoch = 1 to 10 do\n for i, data, labels in loader.epoch() do\n printfn \u0022Epoch %A, minibatch %A\u0022 epoch (i\u002B1)\n // Process the minibatch\n // ...\n(**\n\n## Models\n\nMany machine learning models are differentiable functions whose parameters can be tuned via [gradient-based optimization](https://en.wikipedia.org/wiki/Gradient_descent), finding an optimum for an objective function that quantifies the fit of the model to a given set of data. These models are typically built as compositions non-linear functions and ready-to-use building blocks such as linear, recurrent, and convolutional layers.\n\nFurnace provides the most commonly used model building blocks including convolutions, transposed convolutions, batch normalization, dropout, recurrent and other architectures.\n\nSee the [Furnace.Model](/Furnace/reference/furnace-model.html) namespace for the full API reference.\n\n### Constructing models, PyTorch style\n\nIf you have experience with [PyTorch](https://pytorch.org/), you would find the following way of model definition familiar. Let\u0027s look at an example of a [generative adversarial network (GAN)](https://arxiv.org/abs/1406.2661) architecture.\n*)\nopen Furnace.Model\nopen Furnace.Compose\n\n// PyTorch style\n\n// Define a model class inheriting the base\ntype Generator(nz: int) =\n inherit Model()\n let fc1 = Linear(nz, 256)\n let fc2 = Linear(256, 512)\n let fc3 = Linear(512, 1024)\n let fc4 = Linear(1024, 28*28)\n do base.addModel(fc1, fc2, fc3, fc4)\n override self.forward(x) =\n x\n |\u003E FurnaceImage.view([-1;nz])\n |\u003E fc1.forward\n |\u003E FurnaceImage.leakyRelu(0.2)\n |\u003E fc2.forward\n |\u003E FurnaceImage.leakyRelu(0.2)\n |\u003E fc3.forward\n |\u003E FurnaceImage.leakyRelu(0.2)\n |\u003E fc4.forward\n |\u003E FurnaceImage.tanh\n\n// Define a model class inheriting the base\ntype Discriminator(nz:int) =\n inherit Model()\n let fc1 = Linear(28*28, 1024)\n let fc2 = Linear(1024, 512)\n let fc3 = Linear(512, 256)\n let fc4 = Linear(256, 1)\n do base.addModel(fc1, fc2, fc3, fc4)\n override self.forward(x) =\n x\n |\u003E FurnaceImage.view([-1;28*28])\n |\u003E fc1.forward\n |\u003E FurnaceImage.leakyRelu(0.2)\n |\u003E FurnaceImage.dropout(0.3)\n |\u003E fc2.forward\n |\u003E FurnaceImage.leakyRelu(0.2)\n |\u003E FurnaceImage.dropout(0.3)\n |\u003E fc3.forward\n |\u003E FurnaceImage.leakyRelu(0.2)\n |\u003E FurnaceImage.dropout(0.3)\n |\u003E fc4.forward\n |\u003E FurnaceImage.sigmoid\n\n// Instantiate the defined classes\nlet nz = 128\nlet gen = Generator(nz)\nlet dis = Discriminator(nz)\n\nprint gen\nprint dis\n(*** include-output ***)\n\n(**\n### Constructing models, Furnace style\n\nA key advantage of Furnace lies in the [functional programming](https://en.wikipedia.org/wiki/Functional_programming) paradigm enabled by the F# language, where functions are first-class citizens, many algorithms can be constructed by applying and composing functions, and differentiation operations can be expressed as composable [higher-order functions](https://en.wikipedia.org/wiki/Higher-order_function). This allows very succinct (and beautiful) machine learning code to be expressed as a powerful combination of [lambda calculus](https://en.wikipedia.org/wiki/Lambda_calculus) and [differential calculus](https://en.wikipedia.org/wiki/Differential_calculus).\n\nFor example, the following constructs the same GAN architecture (that we constructed in PyTorch style in the previous section) using Furnace\u0027s \u0060--\u003E\u0060 composition operator, which allows you to seamlessly compose \u0060Model\u0060 instances and differentiable \u0060Tensor-\u003ETensor\u0060 functions. \n*)\n\n// Furnace style\n\n// Model as a composition of models and Tensor-\u003ETensor functions\nlet generator =\n FurnaceImage.view([-1;nz])\n --\u003E Linear(nz, 256)\n --\u003E FurnaceImage.leakyRelu(0.2)\n --\u003E Linear(256, 512)\n --\u003E FurnaceImage.leakyRelu(0.2)\n --\u003E Linear(512, 1024)\n --\u003E FurnaceImage.leakyRelu(0.2)\n --\u003E Linear(1024, 28*28)\n --\u003E FurnaceImage.tanh\n\n// Model as a composition of models and Tensor-\u003ETensor functions\nlet discriminator =\n FurnaceImage.view([-1; 28*28])\n --\u003E Linear(28*28, 1024)\n --\u003E FurnaceImage.leakyRelu(0.2)\n --\u003E FurnaceImage.dropout(0.3)\n --\u003E Linear(1024, 512)\n --\u003E FurnaceImage.leakyRelu(0.2)\n --\u003E FurnaceImage.dropout(0.3)\n --\u003E Linear(512, 256)\n --\u003E FurnaceImage.leakyRelu(0.2)\n --\u003E FurnaceImage.dropout(0.3)\n --\u003E Linear(256, 1)\n --\u003E FurnaceImage.sigmoid\n\nprint generator\nprint discriminator\n(*** include-output ***)"},{"uri":"https://fsprojects.github.io/Furnace/README.html","title":"Running notebooks in MyBinder\n","content":"\n\n# Running notebooks in MyBinder\n\nThe \u0060Dockerfile\u0060 and \u0060NuGet.config\u0060 allow us to run generated notebooks in [MyBinder](https://mybinder.org)\n\n* \u0060master\u0060 branch of fsprojects/furnace: [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/fsprojects/furnace/master)\n\n# Generating docs\n\n\nTo iterate on docs (requires evaluation off since DLLs get locked)\n\n dotnet fsdocs watch \n\nTo use a local build of FSharp.Formatting:\n\n git clone https://github.com/fsprojects/FSharp.Formatting ../FSharp.Formatting\n pushd ..\\FSharp.Formatting\n .\\build\n popd\n pop\n\nThen:\n\n ..\\FSharp.Formatting\\src\\FSharp.Formatting.CommandTool\\bin\\Debug\\net6.0\\fsdocs.exe watch\n ..\\FSharp.Formatting\\src\\FSharp.Formatting.CommandTool\\bin\\Debug\\net6.0\\fsdocs.exe build --clean --eval\n\n## Generated Notebooks\n\nNotebooks are generated for all .md and .fsx files under docs as part of the build.\n\n* Dockerfile - see https://github.com/dotnet/interactive/blob/master/docs/CreateBinder.md\n\n* NuGet.config - likewise\n\nSee MyBinder for creating URLs\n"}] \ No newline at end of file diff --git a/install.fsx b/install.fsx new file mode 100644 index 00000000..f6006f0e --- /dev/null +++ b/install.fsx @@ -0,0 +1,130 @@ +#r "nuget: Furnace-lite,1.0.8" +(** +[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/install.ipynb)  +[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath=install.ipynb)  +[![Script](img/badge-script.svg)](install.fsx)  +[![Script](img/badge-notebook.svg)](install.ipynb) + +# Installing + +Furnace runs on [dotnet](https://dotnet.microsoft.com/), a cross-platform, open-source platform supported on Linux, macOS, and Windows. + +There are various ways in which you can run Furnace, the main ones being: [interactive notebooks](https://github.com/dotnet/interactive) supporting [Visual Studio Code](https://code.visualstudio.com/) and [Jupyter](https://jupyter.org/); running in a [REPL](https://github.com/jonsequitur/dotnet-repl); running [script files](https://docs.microsoft.com/en-us/dotnet/fsharp/tools/fsharp-interactive/); and [compiling, packing, and publishing](https://docs.microsoft.com/en-us/dotnet/core/introduction) performant binaries. + +## Interactive Notebooks and Scripts + +You can use Furnace in [dotnet interactive](https://github.com/dotnet/interactive) notebooks in [Visual Studio Code](https://code.visualstudio.com/) or [Jupyter](https://jupyter.org/), or in F# scripts (`.fsx` files), by referencing the package as follows: + + // Use one of the following three lines + #r "nuget: Furnace-cpu" // Use the latest version + #r "nuget: Furnace-cpu, *-*" // Use the latest pre-release version + #r "nuget: Furnace-cpu, 1.0.1" // Use a specific version + + open Furnace + +*) +
+ +(** +## Dotnet Applications + +You can add Furnace to your dotnet application using the [dotnet](https://dotnet.microsoft.com/) command-line interface (CLI). + +For example, the following creates a new F# console application and adds the latest pre-release version of the `Furnace-cpu` package as a dependency. + + dotnet new console -lang "F#" -o src/app + cd src/app + dotnet add package --prerelease Furnace-cpu + dotnet run + +## Packages + +We provide several package bundles for a variety of use cases. + +* [Furnace-cpu](https://www.nuget.org/packages/Furnace-cpu)
+Includes LibTorch CPU binaries for Linux, macOS, and Windows. + +* [Furnace-cuda-linux](https://www.nuget.org/packages/Furnace-cuda-linux) / [Furnace-cuda-windows](https://www.nuget.org/packages/Furnace-cuda-windows)
+Include LibTorch CPU and CUDA GPU binaries for Linux and Windows. Large download. + +* [Furnace-lite](https://www.nuget.org/packages/Furnace-lite)
+Includes the Torch backend but not the LibTorch binaries. + +### Using local LibTorch binaries (optional) + +You can combine the `Furnace-lite` package bundle with existing local native binaries of LibTorch for your OS (Linux, Mac, or Windows) installed through other means. + +LibTorch is the main tensor computation core implemented in C++/CUDA and it is used by PyTorch in Python and by other projects in various programming languages. The following are two common ways of having LibTorch in your system. + +* If you use Python and have [PyTorch](https://pytorch.org/) installed, this comes with LibTorch as a part of the PyTorch distribution. If your GPU works in this PyTorch installation without any issues, it will also work in Furnace. + +* You can download the native LibTorch package without Python by following the [get started](https://pytorch.org/get-started/locally/) instructions in the PyTorch website, and extracting the downloaded archive to a folder in your system. + +Before using the `Torch` backend in Furnace, you will have to add an explicit load of the LibTorch native library, which you can do as follows. In order to find the location of LibTorch binaries, searching for `libtorch.so` in your system might be helpful. Note that this file is called `libtorch.so` in Linux, `libtorch.dylib` in macOS, and `torch.dll` in Windows. + + open System.Runtime.InteropServices + NativeLibrary.Load("/home/user/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so") + + +## Backends and Devices + +Furnace currently provides two computation backends. + +* The `Torch` backend is the default and recommended backend based on [LibTorch](https://pytorch.org/cppdocs/), using the same C++ and CUDA implementations for tensor computations that power [PyTorch](https://pytorch.org/). On top of these raw tensors (LibTorch's ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. This backend requires platform-specific binaries of LibTorch, which we provide and test on Linux, macOS, and Windows. + + +* The `Reference` backend is implemented purely in F# and can run on any hardware platform where [dotnet](https://dotnet.microsoft.com/) can run (for example iOS, Android, Raspberry Pi). This backend has reasonable performance for use cases dominated by scalar and small tensor operations, and is not recommended for use cases involving large tensor operations (such as machine learning). This backend is always available. + + +### Configuration of Default Backend, Device, and Tensor Type + +Selection of the default backend, device, and tensor type is done using [FurnaceImage.config](https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#config). + +* [Dtype](https://fsprojects.github.io/Furnace/reference/furnace-dtype.html) choices available: `BFloat16`, `Bool`, `Byte`, `Float16`, `Float32`, `Float64`, `Int16`, `Int32`, `Int64`, `Int8` + + +* [Device](https://fsprojects.github.io/Furnace/reference/furnace-device.html) choices available: `CPU`, `GPU` + + +* [Backend](https://fsprojects.github.io/Furnace/reference/furnace-backend.html) choices available: `Reference`, `Torch` + + +For example, the following selects the `Torch` backend with single precision tensors as the default tensor type and GPU (CUDA) execution. + +*) +open Furnace + +FurnaceImage.config(dtype=Dtype.Float32, device=Device.GPU, backend=Backend.Torch) +(** +The following selects the `Reference` backend. + +*) +FurnaceImage.config(backend=Backend.Reference) +(** +A tensor's backend and device can be inspected as follows. + +*) +let t = FurnaceImage.tensor [ 0 .. 10 ] + +let device = t.device +let backend = t.backend +(** +Tensors can be moved between devices (for example from CPU to GPU) using [Tensor.move](https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#move). For example: + +*) +let t2 = t.move(Device.GPU) +(** +## Developing Furnace Libraries + +To develop libraries built on Furnace, you can use the following guideline to reference the various packages. + +* Reference `Furnace.Core` and `Furnace.Data` in your library code. + +* Reference `Furnace.Backends.Reference` in your correctness testing code. + +* Reference `Furnace.Backends.Torch` and `libtorch-cpu` in your CPU testing code. + +* Reference `Furnace.Backends.Torch` and `libtorch-cuda-linux` or `libtorch-cuda-windows` in your (optional) GPU testing code. + +*) + diff --git a/install.html b/install.html new file mode 100644 index 00000000..1d0fa4f2 --- /dev/null +++ b/install.html @@ -0,0 +1,306 @@ + + + + + Installing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+

Binder  +Binder  +Script  +Script

+

Installing

+

Furnace runs on dotnet, a cross-platform, open-source platform supported on Linux, macOS, and Windows.

+

There are various ways in which you can run Furnace, the main ones being: interactive notebooks supporting Visual Studio Code and Jupyter; running in a REPL; running script files; and compiling, packing, and publishing performant binaries.

+

Interactive Notebooks and Scripts

+

You can use Furnace in dotnet interactive notebooks in Visual Studio Code or Jupyter, or in F# scripts (.fsx files), by referencing the package as follows:

+
// Use one of the following three lines
+#r "nuget: Furnace-cpu" // Use the latest version
+#r "nuget: Furnace-cpu, *-*" // Use the latest pre-release version
+#r "nuget: Furnace-cpu, 1.0.1" // Use a specific version
+
+open Furnace
+
+
+ +

Dotnet Applications

+

You can add Furnace to your dotnet application using the dotnet command-line interface (CLI).

+

For example, the following creates a new F# console application and adds the latest pre-release version of the Furnace-cpu package as a dependency.

+
dotnet new console -lang "F#" -o src/app
+cd src/app
+dotnet add package --prerelease Furnace-cpu
+dotnet run
+
+

Packages

+

We provide several package bundles for a variety of use cases.

+ +

Using local LibTorch binaries (optional)

+

You can combine the Furnace-lite package bundle with existing local native binaries of LibTorch for your OS (Linux, Mac, or Windows) installed through other means.

+

LibTorch is the main tensor computation core implemented in C++/CUDA and it is used by PyTorch in Python and by other projects in various programming languages. The following are two common ways of having LibTorch in your system.

+
    +
  • If you use Python and have PyTorch installed, this comes with LibTorch as a part of the PyTorch distribution. If your GPU works in this PyTorch installation without any issues, it will also work in Furnace.
  • +
  • You can download the native LibTorch package without Python by following the get started instructions in the PyTorch website, and extracting the downloaded archive to a folder in your system.
  • +
+

Before using the Torch backend in Furnace, you will have to add an explicit load of the LibTorch native library, which you can do as follows. In order to find the location of LibTorch binaries, searching for libtorch.so in your system might be helpful. Note that this file is called libtorch.so in Linux, libtorch.dylib in macOS, and torch.dll in Windows.

+
open System.Runtime.InteropServices
+NativeLibrary.Load("/home/user/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so")
+
+

Backends and Devices

+

Furnace currently provides two computation backends.

+
    +
  • The Torch backend is the default and recommended backend based on LibTorch, using the same C++ and CUDA implementations for tensor computations that power PyTorch. On top of these raw tensors (LibTorch's ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. This backend requires platform-specific binaries of LibTorch, which we provide and test on Linux, macOS, and Windows.

  • +
  • The Reference backend is implemented purely in F# and can run on any hardware platform where dotnet can run (for example iOS, Android, Raspberry Pi). This backend has reasonable performance for use cases dominated by scalar and small tensor operations, and is not recommended for use cases involving large tensor operations (such as machine learning). This backend is always available.

  • +
+

Configuration of Default Backend, Device, and Tensor Type

+

Selection of the default backend, device, and tensor type is done using FurnaceImage.config.

+
    +
  • Dtype choices available: BFloat16, Bool, Byte, Float16, Float32, Float64, Int16, Int32, Int64, Int8

  • +
  • Device choices available: CPU, GPU

  • +
  • Backend choices available: Reference, Torch

  • +
+

For example, the following selects the Torch backend with single precision tensors as the default tensor type and GPU (CUDA) execution.

+
open Furnace
+
+FurnaceImage.config(dtype=Dtype.Float32, device=Device.GPU, backend=Backend.Torch)
+
+

The following selects the Reference backend.

+
FurnaceImage.config(backend=Backend.Reference)
+
+

A tensor's backend and device can be inspected as follows.

+
let t = FurnaceImage.tensor [ 0 .. 10 ]
+
+let device = t.device
+let backend = t.backend
+
+

Tensors can be moved between devices (for example from CPU to GPU) using Tensor.move. For example:

+
let t2 = t.move(Device.GPU)
+
+

Developing Furnace Libraries

+

To develop libraries built on Furnace, you can use the following guideline to reference the various packages.

+
    +
  • Reference Furnace.Core and Furnace.Data in your library code.
  • +
  • Reference Furnace.Backends.Reference in your correctness testing code.
  • +
  • Reference Furnace.Backends.Torch and libtorch-cpu in your CPU testing code.
  • +
  • Reference Furnace.Backends.Torch and libtorch-cuda-linux or libtorch-cuda-windows in your (optional) GPU testing code.
  • +
+ +
namespace Furnace
+
type FurnaceImage = + static member abs: input: Tensor -> Tensor + static member acos: input: Tensor -> Tensor + static member add: a: Tensor * b: Tensor -> Tensor + static member arange: endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member arangeLike: input: Tensor * endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member argmax: input: Tensor -> int[] + 1 overload + static member argmin: input: Tensor -> int[] + 1 overload + static member asin: input: Tensor -> Tensor + static member atan: input: Tensor -> Tensor + static member backends: unit -> Backend list + ...
<summary> + Tensor operations +</summary>
+
static member Furnace.FurnaceImage.config: unit -> Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer
static member Furnace.FurnaceImage.config: configuration: (Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer) -> unit
static member Furnace.FurnaceImage.config: ?device: Furnace.Device * ?dtype: Furnace.Dtype * ?backend: Furnace.Backend * ?printer: Furnace.Printer -> unit
+
Multiple items
module Backend + +from Furnace
<summary> + Contains functions and settings related to backend specifications. +</summary>

--------------------
type Backend = + | Reference + | Torch + | Other of name: string * code: int + override ToString: unit -> string + member Name: string
<summary> + Represents a backend for Furnace tensors +</summary>
+
union case Furnace.Backend.Reference: Furnace.Backend
<summary> + The reference backend +</summary>
+
static member Furnace.FurnaceImage.seed: ?seed: int -> unit
+
namespace System
+
namespace System.Runtime
+
namespace System.Runtime.InteropServices
+
static member FurnaceImage.config: unit -> Device * Dtype * Backend * Printer
static member FurnaceImage.config: configuration: (Device * Dtype * Backend * Printer) -> unit
static member FurnaceImage.config: ?device: Device * ?dtype: Dtype * ?backend: Backend * ?printer: Printer -> unit
+
Multiple items
module Dtype + +from Furnace
<summary> + Contains functions and settings related to tensor element types +</summary>

--------------------
[<Struct>] +type Dtype = + | BFloat16 + | Float16 + | Float32 + | Float64 + | Int8 + | Byte + | Int16 + | Int32 + | Int64 + | Bool + override ToString: unit -> string + member SummationType: Dtype
<summary> + Represents a storage type for elements of a tensor +</summary>
+
union case Dtype.Float32: Dtype
<summary> + Store elements as 32-bit floating point numbers +</summary>
+
Multiple items
union case Device.Device: DeviceType * int -> Device

--------------------
module Device + +from Furnace
<summary> + Contains functions and settings related to device specifications. +</summary>

--------------------
[<Struct>] +type Device = + | Device of DeviceType * int + override ToString: unit -> string + member DeviceIndex: int + member DeviceType: DeviceType + static member CPU: Device + static member GPU: Device
<summary> + Represents a device specification. +</summary>
+
property Device.GPU: Device with get
+
union case Backend.Torch: Backend
<summary> + The LibTorch backend +</summary>
+
union case Backend.Reference: Backend
<summary> + The reference backend +</summary>
+
val t: Tensor
+
static member FurnaceImage.tensor: value: obj * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor
+
val device: Device
+
val backend: Backend
+
val t2: Tensor
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/install.ipynb b/install.ipynb new file mode 100644 index 00000000..26e7f274 --- /dev/null +++ b/install.ipynb @@ -0,0 +1,219 @@ + + { + "cells": [ + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n", +"// !bash \u003c(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Import Furnace package\n", +"#r \"nuget: Furnace-lite,1.0.8\"\n", +"\n", +"// Set dotnet interactive formatter to plaintext\n", +"Formatter.SetPreferredMimeTypesFor(typeof\u003cobj\u003e, \"text/plain\")\n", +"Formatter.Register(fun (x:obj) (writer: TextWriter) -\u003e fprintfn writer \"%120A\" x )\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/install.ipynb)\u0026emsp;\n", +"[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath=install.ipynb)\u0026emsp;\n", +"[![Script](img/badge-script.svg)](install.fsx)\u0026emsp;\n", +"[![Script](img/badge-notebook.svg)](install.ipynb)\n", +"\n", +"# Installing\n", +"\n", +"Furnace runs on [dotnet](https://dotnet.microsoft.com/), a cross-platform, open-source platform supported on Linux, macOS, and Windows.\n", +"\n", +"There are various ways in which you can run Furnace, the main ones being: [interactive notebooks](https://github.com/dotnet/interactive) supporting [Visual Studio Code](https://code.visualstudio.com/) and [Jupyter](https://jupyter.org/); running in a [REPL](https://github.com/jonsequitur/dotnet-repl); running [script files](https://docs.microsoft.com/en-us/dotnet/fsharp/tools/fsharp-interactive/); and [compiling, packing, and publishing](https://docs.microsoft.com/en-us/dotnet/core/introduction) performant binaries.\n", +"\n", +"## Interactive Notebooks and Scripts\n", +"\n", +"You can use Furnace in [dotnet interactive](https://github.com/dotnet/interactive) notebooks in [Visual Studio Code](https://code.visualstudio.com/) or [Jupyter](https://jupyter.org/), or in F# scripts (`.fsx` files), by referencing the package as follows:\n", +"\n", +" // Use one of the following three lines\n", +" #r \"nuget: Furnace-cpu\" // Use the latest version\n", +" #r \"nuget: Furnace-cpu, *-*\" // Use the latest pre-release version\n", +" #r \"nuget: Furnace-cpu, 1.0.1\" // Use a specific version\n", +"\n", +" open Furnace\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["\u003c/br\u003e\n", +"\u003cimg src=\"img/anim-intro-1.gif\" width=\"85%\" /\u003e\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["## Dotnet Applications\n", +"\n", +"You can add Furnace to your dotnet application using the [dotnet](https://dotnet.microsoft.com/) command-line interface (CLI).\n", +"\n", +"For example, the following creates a new F# console application and adds the latest pre-release version of the `Furnace-cpu` package as a dependency.\n", +"\n", +" dotnet new console -lang \"F#\" -o src/app\n", +" cd src/app\n", +" dotnet add package --prerelease Furnace-cpu\n", +" dotnet run\n", +"\n", +"## Packages\n", +"\n", +"We provide several package bundles for a variety of use cases.\n", +"\n", +"* [Furnace-cpu](https://www.nuget.org/packages/Furnace-cpu)\u003c/br\u003e\n", +"Includes LibTorch CPU binaries for Linux, macOS, and Windows.\n", +"\n", +"* [Furnace-cuda-linux](https://www.nuget.org/packages/Furnace-cuda-linux) / [Furnace-cuda-windows](https://www.nuget.org/packages/Furnace-cuda-windows)\u003c/br\u003e\n", +"Include LibTorch CPU and CUDA GPU binaries for Linux and Windows. Large download.\n", +"\n", +"* [Furnace-lite](https://www.nuget.org/packages/Furnace-lite)\u003c/br\u003e\n", +"Includes the Torch backend but not the LibTorch binaries.\n", +"\n", +"### Using local LibTorch binaries (optional)\n", +"\n", +"You can combine the `Furnace-lite` package bundle with existing local native binaries of LibTorch for your OS (Linux, Mac, or Windows) installed through other means.\n", +"\n", +"LibTorch is the main tensor computation core implemented in C++/CUDA and it is used by PyTorch in Python and by other projects in various programming languages. The following are two common ways of having LibTorch in your system.\n", +"\n", +"* If you use Python and have [PyTorch](https://pytorch.org/) installed, this comes with LibTorch as a part of the PyTorch distribution. If your GPU works in this PyTorch installation without any issues, it will also work in Furnace.\n", +"\n", +"* You can download the native LibTorch package without Python by following the [get started](https://pytorch.org/get-started/locally/) instructions in the PyTorch website, and extracting the downloaded archive to a folder in your system.\n", +"\n", +"Before using the `Torch` backend in Furnace, you will have to add an explicit load of the LibTorch native library, which you can do as follows. In order to find the location of LibTorch binaries, searching for `libtorch.so` in your system might be helpful. Note that this file is called `libtorch.so` in Linux, `libtorch.dylib` in macOS, and `torch.dll` in Windows.\n", +"\n", +" open System.Runtime.InteropServices\n", +" NativeLibrary.Load(\"/home/user/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so\")\n", +"\n", +"\n", +"## Backends and Devices\n", +"\n", +"Furnace currently provides two computation backends.\n", +"\n", +"* The `Torch` backend is the default and recommended backend based on [LibTorch](https://pytorch.org/cppdocs/), using the same C++ and CUDA implementations for tensor computations that power [PyTorch](https://pytorch.org/). On top of these raw tensors (LibTorch\u0027s ATen, excluding autograd), Furnace implements its own computation graph and differentiation capabilities. This backend requires platform-specific binaries of LibTorch, which we provide and test on Linux, macOS, and Windows.\n", +" \n", +"\n", +"* The `Reference` backend is implemented purely in F# and can run on any hardware platform where [dotnet](https://dotnet.microsoft.com/) can run (for example iOS, Android, Raspberry Pi). This backend has reasonable performance for use cases dominated by scalar and small tensor operations, and is not recommended for use cases involving large tensor operations (such as machine learning). This backend is always available.\n", +" \n", +"\n", +"### Configuration of Default Backend, Device, and Tensor Type\n", +"\n", +"Selection of the default backend, device, and tensor type is done using [FurnaceImage.config](https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html#config).\n", +"\n", +"* [Dtype](https://fsprojects.github.io/Furnace/reference/furnace-dtype.html) choices available: `BFloat16`, `Bool`, `Byte`, `Float16`, `Float32`, `Float64`, `Int16`, `Int32`, `Int64`, `Int8`\n", +" \n", +"\n", +"* [Device](https://fsprojects.github.io/Furnace/reference/furnace-device.html) choices available: `CPU`, `GPU`\n", +" \n", +"\n", +"* [Backend](https://fsprojects.github.io/Furnace/reference/furnace-backend.html) choices available: `Reference`, `Torch`\n", +" \n", +"\n", +"For example, the following selects the `Torch` backend with single precision tensors as the default tensor type and GPU (CUDA) execution.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 2, "outputs": [], + "source": ["open Furnace\n", +"\n", +"FurnaceImage.config(dtype=Dtype.Float32, device=Device.GPU, backend=Backend.Torch)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["The following selects the `Reference` backend.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 3, "outputs": [], + "source": ["FurnaceImage.config(backend=Backend.Reference)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["A tensor\u0027s backend and device can be inspected as follows.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 4, "outputs": [], + "source": ["let t = FurnaceImage.tensor [ 0 .. 10 ]\n", +"\n", +"let device = t.device\n", +"let backend = t.backend\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["Tensors can be moved between devices (for example from CPU to GPU) using [Tensor.move](https://fsprojects.github.io/Furnace/reference/furnace-tensor.html#move). For example:\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 5, "outputs": [], + "source": ["let t2 = t.move(Device.GPU)\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["## Developing Furnace Libraries\n", +"\n", +"To develop libraries built on Furnace, you can use the following guideline to reference the various packages.\n", +"\n", +"* Reference `Furnace.Core` and `Furnace.Data` in your library code.\n", +"\n", +"* Reference `Furnace.Backends.Reference` in your correctness testing code.\n", +"\n", +"* Reference `Furnace.Backends.Torch` and `libtorch-cpu` in your CPU testing code.\n", +"\n", +"* Reference `Furnace.Backends.Torch` and `libtorch-cuda-linux` or `libtorch-cuda-windows` in your (optional) GPU testing code.\n", +"\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/models.fsx b/models.fsx new file mode 100644 index 00000000..81dab56f --- /dev/null +++ b/models.fsx @@ -0,0 +1,12 @@ +(** +Test + +*) +open Furnace + +FurnaceImage.config(backend=Backend.Reference) + +let a = FurnaceImage.tensor([1,2,3]) +printfn "%A" a(* output: +*) + diff --git a/models.html b/models.html new file mode 100644 index 00000000..490b75ad --- /dev/null +++ b/models.html @@ -0,0 +1,175 @@ + + + + + models + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+

Test

+
open Furnace
+
+FurnaceImage.config(backend=Backend.Reference)
+
+let a = FurnaceImage.tensor([1,2,3])
+printfn "%A" a
+
+
+ +
namespace Furnace
+
type FurnaceImage = + static member abs: input: Tensor -> Tensor + static member acos: input: Tensor -> Tensor + static member add: a: Tensor * b: Tensor -> Tensor + static member arange: endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member arangeLike: input: Tensor * endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member argmax: input: Tensor -> int[] + 1 overload + static member argmin: input: Tensor -> int[] + 1 overload + static member asin: input: Tensor -> Tensor + static member atan: input: Tensor -> Tensor + static member backends: unit -> Backend list + ...
<summary> + Tensor operations +</summary>
+
static member Furnace.FurnaceImage.config: unit -> Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer
static member Furnace.FurnaceImage.config: configuration: (Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer) -> unit
static member Furnace.FurnaceImage.config: ?device: Furnace.Device * ?dtype: Furnace.Dtype * ?backend: Furnace.Backend * ?printer: Furnace.Printer -> unit
+
Multiple items
module Backend + +from Furnace
<summary> + Contains functions and settings related to backend specifications. +</summary>

--------------------
type Backend = + | Reference + | Torch + | Other of name: string * code: int + override ToString: unit -> string + member Name: string
<summary> + Represents a backend for Furnace tensors +</summary>
+
union case Furnace.Backend.Reference: Furnace.Backend
<summary> + The reference backend +</summary>
+
static member Furnace.FurnaceImage.seed: ?seed: int -> unit
+
static member FurnaceImage.config: unit -> Device * Dtype * Backend * Printer
static member FurnaceImage.config: configuration: (Device * Dtype * Backend * Printer) -> unit
static member FurnaceImage.config: ?device: Device * ?dtype: Dtype * ?backend: Backend * ?printer: Printer -> unit
+
union case Backend.Reference: Backend
<summary> + The reference backend +</summary>
+
val a: Tensor
+
static member FurnaceImage.tensor: value: obj * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor
+
val printfn: format: Printf.TextWriterFormat<'T> -> 'T
<summary>Print to <c>stdout</c> using the given format, and add a newline.</summary>
<param name="format">The formatter.</param>
<returns>The formatted result.</returns>
<example>See <c>Printf.printfn</c> (link: <see cref="M:Microsoft.FSharp.Core.PrintfModule.PrintFormatLine``1" />) for examples.</example>
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/models.ipynb b/models.ipynb new file mode 100644 index 00000000..d6c0065c --- /dev/null +++ b/models.ipynb @@ -0,0 +1,45 @@ + + { + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["Test\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 2, "outputs": [ + { + "data": { + "text/plain": [""] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["open Furnace\n", +"\n", +"FurnaceImage.config(backend=Backend.Reference)\n", +"\n", +"let a = FurnaceImage.tensor([1,2,3])\n", +"printfn \"%A\" a\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/nested-derivatives.fsx b/nested-derivatives.fsx new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/nested-derivatives.fsx @@ -0,0 +1,2 @@ + + diff --git a/nested-derivatives.html b/nested-derivatives.html new file mode 100644 index 00000000..b59d49b2 --- /dev/null +++ b/nested-derivatives.html @@ -0,0 +1,127 @@ + + + + + nested-derivatives + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/nested-derivatives.ipynb b/nested-derivatives.ipynb new file mode 100644 index 00000000..08fd20d5 --- /dev/null +++ b/nested-derivatives.ipynb @@ -0,0 +1,18 @@ + + { + "cells": [], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/optimization.fsx b/optimization.fsx new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/optimization.fsx @@ -0,0 +1,2 @@ + + diff --git a/optimization.html b/optimization.html new file mode 100644 index 00000000..fcca89a5 --- /dev/null +++ b/optimization.html @@ -0,0 +1,127 @@ + + + + + optimization + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/optimization.ipynb b/optimization.ipynb new file mode 100644 index 00000000..08fd20d5 --- /dev/null +++ b/optimization.ipynb @@ -0,0 +1,18 @@ + + { + "cells": [], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/probability-distributions.fsx b/probability-distributions.fsx new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/probability-distributions.fsx @@ -0,0 +1,2 @@ + + diff --git a/probability-distributions.html b/probability-distributions.html new file mode 100644 index 00000000..7d76b048 --- /dev/null +++ b/probability-distributions.html @@ -0,0 +1,127 @@ + + + + + probability-distributions + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/probability-distributions.ipynb b/probability-distributions.ipynb new file mode 100644 index 00000000..08fd20d5 --- /dev/null +++ b/probability-distributions.ipynb @@ -0,0 +1,18 @@ + + { + "cells": [], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/quickstart.fsx b/quickstart.fsx new file mode 100644 index 00000000..6bd413ed --- /dev/null +++ b/quickstart.fsx @@ -0,0 +1,199 @@ +#r "nuget: Furnace-lite,1.0.8" +#r "nuget: SixLabors.ImageSharp,1.0.1" +(** +[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/quickstart.ipynb)  +[![Script](img/badge-script.svg)](quickstart.fsx)  +[![Script](img/badge-notebook.svg)](quickstart.ipynb) + +# Quickstart + +Here we cover some key tasks involved in a typical machine learning pipeline and how these can be implemented with Furnace. Note that a significant part of Furnace's design has been influenced by [PyTorch](https://pytorch.org/) and you would feel mostly at home if you have familiarity with PyTorch. + +## Datasets and Data Loaders + +Furnace provides the [Dataset](https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html) type that represents a data source and the [DataLoader](https://fsprojects.github.io/Furnace/reference/furnace-data-dataloader.html) type that handles the loading of data from datasets and iterating over [minibatches](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Iterative_method) of data. + +See the [Furnace.Data](/Furnace/reference/furnace-data.html) namespace for the full API reference. + +### Datasets + +Furnace has ready-to-use types that cover main datasets typically used in machine learning, such as [MNIST](https://fsprojects.github.io/Furnace/reference/furnace-data-mnist.html), [CIFAR10](https://fsprojects.github.io/Furnace/reference/furnace-data-cifar10.html), [CIFAR100](https://fsprojects.github.io/Furnace/reference/furnace-data-cifar100.html), and also more generic dataset types such as [TensorDataset](https://fsprojects.github.io/Furnace/reference/furnace-data-tensordataset.html) or [ImageDataset](https://fsprojects.github.io/Furnace/reference/furnace-data-imagedataset.html). + +The following loads the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset and shows one image entry and the corresponding label. + +*) +open Furnace +open Furnace.Data + +// First ten images in MNIST training set +let dataset = MNIST("../data", train=true, transform=id, n=10) + +// Inspect a single image and label +let data, label = dataset[7] + +// Save image to file +data.saveImage("test.png")(* output: +No value returned by any evaluator*) +// Inspect data as ASCII and show label +printfn "Data: %A\nLabel: %A" (data.toImageString()) label(* output: +Data: " + + + + + ~-}@#####Z + -j*W########J' + O############i + [##Mxxxxo####i + ::^ 'W##Z + |&##f + (o###' + (q%###d. + "uaaa####8}: + _m########O + _*####@####? + "v<____f##? + `##? + |##? + ?. 1&##? + iQ#: `)8##&! + p##txxxxb###o\ + p#########MC. + +J#####wdt_ + }B#Z}^ + + + +" +Label: tensor(3,dtype=Int32,device=cpu-1)*) +(** +### Data Loaders + +A data loader handles tasks such as constructing minibatches from an underlying dataset on-the-fly, shuffling the data, and moving the data tensors between devices. In the example below we show a single batch of six MNIST images and their corresponding classification labels. + +*) +let loader = DataLoader(dataset, shuffle=true, batchSize=6) +let batch, labels = loader.batch() + +printfn "%A\nLabels: %A" (batch.toImageString()) labels(* output: +*) +(** +In practice a data loader is typically used to iterate over all minibatches in a given dataset in order to feed each minibatch through a machine learning model. One full iteration over the dataset would be called an "epoch". Typically you would perform multiple such epochs of iterations during the training of a model. + +*) +for epoch = 1 to 10 do + for i, data, labels in loader.epoch() do + printfn "Epoch %A, minibatch %A" epoch (i+1) + // Process the minibatch + // ... +(** +## Models + +Many machine learning models are differentiable functions whose parameters can be tuned via [gradient-based optimization](https://en.wikipedia.org/wiki/Gradient_descent), finding an optimum for an objective function that quantifies the fit of the model to a given set of data. These models are typically built as compositions non-linear functions and ready-to-use building blocks such as linear, recurrent, and convolutional layers. + +Furnace provides the most commonly used model building blocks including convolutions, transposed convolutions, batch normalization, dropout, recurrent and other architectures. + +See the [Furnace.Model](/Furnace/reference/furnace-model.html) namespace for the full API reference. + +### Constructing models, PyTorch style + +If you have experience with [PyTorch](https://pytorch.org/), you would find the following way of model definition familiar. Let's look at an example of a [generative adversarial network (GAN)](https://arxiv.org/abs/1406.2661) architecture. + +*) +open Furnace.Model +open Furnace.Compose + +// PyTorch style + +// Define a model class inheriting the base +type Generator(nz: int) = + inherit Model() + let fc1 = Linear(nz, 256) + let fc2 = Linear(256, 512) + let fc3 = Linear(512, 1024) + let fc4 = Linear(1024, 28*28) + do base.addModel(fc1, fc2, fc3, fc4) + override self.forward(x) = + x + |> FurnaceImage.view([-1;nz]) + |> fc1.forward + |> FurnaceImage.leakyRelu(0.2) + |> fc2.forward + |> FurnaceImage.leakyRelu(0.2) + |> fc3.forward + |> FurnaceImage.leakyRelu(0.2) + |> fc4.forward + |> FurnaceImage.tanh + +// Define a model class inheriting the base +type Discriminator(nz:int) = + inherit Model() + let fc1 = Linear(28*28, 1024) + let fc2 = Linear(1024, 512) + let fc3 = Linear(512, 256) + let fc4 = Linear(256, 1) + do base.addModel(fc1, fc2, fc3, fc4) + override self.forward(x) = + x + |> FurnaceImage.view([-1;28*28]) + |> fc1.forward + |> FurnaceImage.leakyRelu(0.2) + |> FurnaceImage.dropout(0.3) + |> fc2.forward + |> FurnaceImage.leakyRelu(0.2) + |> FurnaceImage.dropout(0.3) + |> fc3.forward + |> FurnaceImage.leakyRelu(0.2) + |> FurnaceImage.dropout(0.3) + |> fc4.forward + |> FurnaceImage.sigmoid + +// Instantiate the defined classes +let nz = 128 +let gen = Generator(nz) +let dis = Discriminator(nz) + +print gen +print dis(* output: +*) +(** +### Constructing models, Furnace style + +A key advantage of Furnace lies in the [functional programming](https://en.wikipedia.org/wiki/Functional_programming) paradigm enabled by the F# language, where functions are first-class citizens, many algorithms can be constructed by applying and composing functions, and differentiation operations can be expressed as composable [higher-order functions](https://en.wikipedia.org/wiki/Higher-order_function). This allows very succinct (and beautiful) machine learning code to be expressed as a powerful combination of [lambda calculus](https://en.wikipedia.org/wiki/Lambda_calculus) and [differential calculus](https://en.wikipedia.org/wiki/Differential_calculus). + +For example, the following constructs the same GAN architecture (that we constructed in PyTorch style in the previous section) using Furnace's `-->` composition operator, which allows you to seamlessly compose `Model` instances and differentiable `Tensor->Tensor` functions. + +*) +// Furnace style + +// Model as a composition of models and Tensor->Tensor functions +let generator = + FurnaceImage.view([-1;nz]) + --> Linear(nz, 256) + --> FurnaceImage.leakyRelu(0.2) + --> Linear(256, 512) + --> FurnaceImage.leakyRelu(0.2) + --> Linear(512, 1024) + --> FurnaceImage.leakyRelu(0.2) + --> Linear(1024, 28*28) + --> FurnaceImage.tanh + +// Model as a composition of models and Tensor->Tensor functions +let discriminator = + FurnaceImage.view([-1; 28*28]) + --> Linear(28*28, 1024) + --> FurnaceImage.leakyRelu(0.2) + --> FurnaceImage.dropout(0.3) + --> Linear(1024, 512) + --> FurnaceImage.leakyRelu(0.2) + --> FurnaceImage.dropout(0.3) + --> Linear(512, 256) + --> FurnaceImage.leakyRelu(0.2) + --> FurnaceImage.dropout(0.3) + --> Linear(256, 1) + --> FurnaceImage.sigmoid + +print generator +print discriminator(* output: +*) + diff --git a/quickstart.html b/quickstart.html new file mode 100644 index 00000000..b2882a34 --- /dev/null +++ b/quickstart.html @@ -0,0 +1,416 @@ + + + + + Quickstart + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+

Binder  +Script  +Script

+

Quickstart

+

Here we cover some key tasks involved in a typical machine learning pipeline and how these can be implemented with Furnace. Note that a significant part of Furnace's design has been influenced by PyTorch and you would feel mostly at home if you have familiarity with PyTorch.

+

Datasets and Data Loaders

+

Furnace provides the Dataset type that represents a data source and the DataLoader type that handles the loading of data from datasets and iterating over minibatches of data.

+

See the Furnace.Data namespace for the full API reference.

+

Datasets

+

Furnace has ready-to-use types that cover main datasets typically used in machine learning, such as MNIST, CIFAR10, CIFAR100, and also more generic dataset types such as TensorDataset or ImageDataset.

+

The following loads the MNIST dataset and shows one image entry and the corresponding label.

+
open Furnace
+open Furnace.Data
+
+// First ten images in MNIST training set
+let dataset = MNIST("../data", train=true, transform=id, n=10)
+
+// Inspect a single image and label
+let data, label = dataset[7]
+
+// Save image to file
+data.saveImage("test.png")
+
+
No value returned by any evaluator
+
// Inspect data as ASCII and show label
+printfn "Data: %A\nLabel: %A" (data.toImageString()) label
+
+
Data: "                            
+                            
+                            
+                            
+                            
+           ~-}@#####Z       
+         -j*W########J'     
+         O############i     
+         [##Mxxxxo####i     
+          ::^    'W##Z      
+                 |&##f      
+                (o###'      
+              (q%###d.      
+         "uaaa####8}:       
+        _m########O         
+        _*####@####?        
+         "v<____f##?        
+                `##?        
+                |##?        
+       ?.      1&##?        
+     iQ#:    `)8##&!        
+     p##txxxxb###o\         
+     p#########MC.          
+     +J#####wdt_            
+       }B#Z}^               
+                            
+                            
+                            
+"
+Label: tensor(3,dtype=Int32,device=cpu-1)
+

Data Loaders

+

A data loader handles tasks such as constructing minibatches from an underlying dataset on-the-fly, shuffling the data, and moving the data tensors between devices. In the example below we show a single batch of six MNIST images and their corresponding classification labels.

+
let loader = DataLoader(dataset, shuffle=true, batchSize=6)
+let batch, labels = loader.batch()
+
+printfn "%A\nLabels: %A" (batch.toImageString()) labels
+
+
+

In practice a data loader is typically used to iterate over all minibatches in a given dataset in order to feed each minibatch through a machine learning model. One full iteration over the dataset would be called an "epoch". Typically you would perform multiple such epochs of iterations during the training of a model.

+
for epoch = 1 to 10 do
+    for i, data, labels in loader.epoch() do
+        printfn "Epoch %A, minibatch %A" epoch (i+1)
+        // Process the minibatch
+        // ...
+
+

Models

+

Many machine learning models are differentiable functions whose parameters can be tuned via gradient-based optimization, finding an optimum for an objective function that quantifies the fit of the model to a given set of data. These models are typically built as compositions non-linear functions and ready-to-use building blocks such as linear, recurrent, and convolutional layers.

+

Furnace provides the most commonly used model building blocks including convolutions, transposed convolutions, batch normalization, dropout, recurrent and other architectures.

+

See the Furnace.Model namespace for the full API reference.

+

Constructing models, PyTorch style

+

If you have experience with PyTorch, you would find the following way of model definition familiar. Let's look at an example of a generative adversarial network (GAN) architecture.

+
open Furnace.Model
+open Furnace.Compose
+
+// PyTorch style
+
+// Define a model class inheriting the base
+type Generator(nz: int) =
+    inherit Model()
+    let fc1 = Linear(nz, 256)
+    let fc2 = Linear(256, 512)
+    let fc3 = Linear(512, 1024)
+    let fc4 = Linear(1024, 28*28)
+    do base.addModel(fc1, fc2, fc3, fc4)
+    override self.forward(x) =
+        x
+        |> FurnaceImage.view([-1;nz])
+        |> fc1.forward
+        |> FurnaceImage.leakyRelu(0.2)
+        |> fc2.forward
+        |> FurnaceImage.leakyRelu(0.2)
+        |> fc3.forward
+        |> FurnaceImage.leakyRelu(0.2)
+        |> fc4.forward
+        |> FurnaceImage.tanh
+
+// Define a model class inheriting the base
+type Discriminator(nz:int) =
+    inherit Model()
+    let fc1 = Linear(28*28, 1024)
+    let fc2 = Linear(1024, 512)
+    let fc3 = Linear(512, 256)
+    let fc4 = Linear(256, 1)
+    do base.addModel(fc1, fc2, fc3, fc4)
+    override self.forward(x) =
+        x
+        |> FurnaceImage.view([-1;28*28])
+        |> fc1.forward
+        |> FurnaceImage.leakyRelu(0.2)
+        |> FurnaceImage.dropout(0.3)
+        |> fc2.forward
+        |> FurnaceImage.leakyRelu(0.2)
+        |> FurnaceImage.dropout(0.3)
+        |> fc3.forward
+        |> FurnaceImage.leakyRelu(0.2)
+        |> FurnaceImage.dropout(0.3)
+        |> fc4.forward
+        |> FurnaceImage.sigmoid
+
+// Instantiate the defined classes
+let nz = 128
+let gen = Generator(nz)
+let dis = Discriminator(nz)
+
+print gen
+print dis
+
+
+

Constructing models, Furnace style

+

A key advantage of Furnace lies in the functional programming paradigm enabled by the F# language, where functions are first-class citizens, many algorithms can be constructed by applying and composing functions, and differentiation operations can be expressed as composable higher-order functions. This allows very succinct (and beautiful) machine learning code to be expressed as a powerful combination of lambda calculus and differential calculus.

+

For example, the following constructs the same GAN architecture (that we constructed in PyTorch style in the previous section) using Furnace's --> composition operator, which allows you to seamlessly compose Model instances and differentiable Tensor->Tensor functions.

+
// Furnace style
+
+// Model as a composition of models and Tensor->Tensor functions
+let generator =
+    FurnaceImage.view([-1;nz])
+    --> Linear(nz, 256)
+    --> FurnaceImage.leakyRelu(0.2)
+    --> Linear(256, 512)
+    --> FurnaceImage.leakyRelu(0.2)
+    --> Linear(512, 1024)
+    --> FurnaceImage.leakyRelu(0.2)
+    --> Linear(1024, 28*28)
+    --> FurnaceImage.tanh
+
+// Model as a composition of models and Tensor->Tensor functions
+let discriminator =
+    FurnaceImage.view([-1; 28*28])
+    --> Linear(28*28, 1024)
+    --> FurnaceImage.leakyRelu(0.2)
+    --> FurnaceImage.dropout(0.3)
+    --> Linear(1024, 512)
+    --> FurnaceImage.leakyRelu(0.2)
+    --> FurnaceImage.dropout(0.3)
+    --> Linear(512, 256)
+    --> FurnaceImage.leakyRelu(0.2)
+    --> FurnaceImage.dropout(0.3)
+    --> Linear(256, 1)
+    --> FurnaceImage.sigmoid
+
+print generator
+print discriminator
+
+
+ +
namespace Furnace
+
type FurnaceImage = + static member abs: input: Tensor -> Tensor + static member acos: input: Tensor -> Tensor + static member add: a: Tensor * b: Tensor -> Tensor + static member arange: endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member arangeLike: input: Tensor * endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member argmax: input: Tensor -> int[] + 1 overload + static member argmin: input: Tensor -> int[] + 1 overload + static member asin: input: Tensor -> Tensor + static member atan: input: Tensor -> Tensor + static member backends: unit -> Backend list + ...
<summary> + Tensor operations +</summary>
+
static member Furnace.FurnaceImage.config: unit -> Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer
static member Furnace.FurnaceImage.config: configuration: (Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer) -> unit
static member Furnace.FurnaceImage.config: ?device: Furnace.Device * ?dtype: Furnace.Dtype * ?backend: Furnace.Backend * ?printer: Furnace.Printer -> unit
+
Multiple items
module Backend + +from Furnace
<summary> + Contains functions and settings related to backend specifications. +</summary>

--------------------
type Backend = + | Reference + | Torch + | Other of name: string * code: int + override ToString: unit -> string + member Name: string
<summary> + Represents a backend for Furnace tensors +</summary>
+
union case Furnace.Backend.Reference: Furnace.Backend
<summary> + The reference backend +</summary>
+
static member Furnace.FurnaceImage.seed: ?seed: int -> unit
+
namespace Furnace.Util
+
namespace Furnace.Data
+
val dataset: MNIST
+
Multiple items
type MNIST = + inherit Dataset + new: path: string * ?urls: seq<string> * ?train: bool * ?transform: (Tensor -> Tensor) * ?targetTransform: (Tensor -> Tensor) * ?n: int -> MNIST + override item: i: int -> Tensor * Tensor + member classNames: string[] + member classes: int + override length: int

--------------------
new: path: string * ?urls: seq<string> * ?train: bool * ?transform: (Tensor -> Tensor) * ?targetTransform: (Tensor -> Tensor) * ?n: int -> MNIST
+
val id: x: 'T -> 'T
<summary>The identity function</summary>
<param name="x">The input value.</param>
<returns>The same value.</returns>
<example id="id-example"><code lang="fsharp"> + id 12 // Evaulates to 12 + id "abc" // Evaulates to "abc" + </code></example>
+
argument n: int option
+
val data: Tensor
+
val label: Tensor
+
val pngToHtml: fileName: string -> widthPixels: int -> string
<summary> + Given a PNG image file name, returns an HTML image element with the image content included as a Base64 encoded string +</summary>
+
val printfn: format: Printf.TextWriterFormat<'T> -> 'T
<summary>Print to <c>stdout</c> using the given format, and add a newline.</summary>
<param name="format">The formatter.</param>
<returns>The formatted result.</returns>
<example>See <c>Printf.printfn</c> (link: <see cref="M:Microsoft.FSharp.Core.PrintfModule.PrintFormatLine``1" />) for examples.</example>
+
val loader: DataLoader
+
Multiple items
type DataLoader = + new: dataset: Dataset * batchSize: int * ?shuffle: bool * ?dropLast: bool * ?device: Device * ?dtype: Dtype * ?backend: Backend * ?targetDevice: Device * ?targetDtype: Dtype * ?targetBackend: Backend -> DataLoader + member batch: ?batchSize: int -> Tensor * Tensor + member epoch: ?numBatches: int -> seq<int * Tensor * Tensor> + member length: int

--------------------
new: dataset: Dataset * batchSize: int * ?shuffle: bool * ?dropLast: bool * ?device: Device * ?dtype: Dtype * ?backend: Backend * ?targetDevice: Device * ?targetDtype: Dtype * ?targetBackend: Backend -> DataLoader
+
val batch: Tensor
+
val labels: Tensor
+
member DataLoader.batch: ?batchSize: int -> Tensor * Tensor
+
val epoch: int
+
val i: int
+
member DataLoader.epoch: ?numBatches: int -> seq<int * Tensor * Tensor>
+
namespace Furnace.Model
+
module Compose + +from Furnace
+
Multiple items
type Generator = + inherit Model + new: nz: int -> Generator + override forward: x: Tensor -> Tensor

--------------------
new: nz: int -> Generator
+
val nz: int
+
Multiple items
val int: value: 'T -> int (requires member op_Explicit)
<summary>Converts the argument to signed 32-bit integer. This is a direct conversion for all + primitive numeric types. For strings, the input is converted using <c>Int32.Parse()</c> + with InvariantCulture settings. Otherwise the operation requires an appropriate + static conversion method on the input type.</summary>
<param name="value">The input value.</param>
<returns>The converted int</returns>
<example id="int-example"><code lang="fsharp"></code></example>


--------------------
[<Struct>] +type int = int32
<summary>An abbreviation for the CLI type <see cref="T:System.Int32" />.</summary>
<category>Basic Types</category>


--------------------
type int<'Measure> = + int
<summary>The type of 32-bit signed integer numbers, annotated with a unit of measure. The unit + of measure is erased in compiled code and when values of this type + are analyzed using reflection. The type is representationally equivalent to + <see cref="T:System.Int32" />.</summary>
<category>Basic Types with Units of Measure</category>
+
Multiple items
namespace Furnace.Model

--------------------
type Model = Model<Tensor,Tensor>

--------------------
new: ?f: ('In -> 'Out) * ?parameters: seq<Parameter> * ?buffers: seq<Parameter> * ?models: seq<ModelBase> -> Model<'In,'Out>
+
val fc1: Linear
+
Multiple items
type Linear = + inherit Model + new: inFeatures: int * outFeatures: int * ?bias: bool -> Linear + override ToString: unit -> string + override forward: value: Tensor -> Tensor + member bias: Tensor + member weight: Tensor
<summary>A model that applies a linear transformation to the incoming data: \(y = xA^T + b\)</summary>

--------------------
new: inFeatures: int * outFeatures: int * ?bias: bool -> Linear
+
val fc2: Linear
+
val fc3: Linear
+
val fc4: Linear
+
val self: Generator
+
val x: Tensor
+
static member FurnaceImage.view: shape: seq<int> -> (Tensor -> Tensor)
static member FurnaceImage.view: shape: int -> (Tensor -> Tensor)
static member FurnaceImage.view: input: Tensor * shape: int -> Tensor
static member FurnaceImage.view: input: Tensor * shape: seq<int> -> Tensor
+
override Linear.forward: value: Tensor -> Tensor
+
static member FurnaceImage.leakyRelu: ?negativeSlope: float -> (Tensor -> Tensor)
static member FurnaceImage.leakyRelu: input: Tensor * ?negativeSlope: float -> Tensor
+
static member FurnaceImage.tanh: input: Tensor -> Tensor
+
Multiple items
type Discriminator = + inherit Model + new: nz: int -> Discriminator + override forward: x: Tensor -> Tensor

--------------------
new: nz: int -> Discriminator
+
val self: Discriminator
+
static member FurnaceImage.dropout: ?p: double -> (Tensor -> Tensor)
static member FurnaceImage.dropout: input: Tensor * ?p: double -> Tensor
+
static member FurnaceImage.sigmoid: input: Tensor -> Tensor
+
val gen: Generator
+
val dis: Discriminator
+
val print: x: 'a -> unit
<summary> + Print the given value to the console using the '%A' printf format specifier +</summary>
+
val generator: Model<Tensor,Tensor>
+
val discriminator: Model<Tensor,Tensor>
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/quickstart.ipynb b/quickstart.ipynb new file mode 100644 index 00000000..bf2697bc --- /dev/null +++ b/quickstart.ipynb @@ -0,0 +1,325 @@ + + { + "cells": [ + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n", +"// !bash \u003c(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Import Furnace package\n", +"#r \"nuget: Furnace-lite,1.0.8\"\n", +"#r \"nuget: SixLabors.ImageSharp,1.0.1\"\n", +"\n", +"// Set dotnet interactive formatter to plaintext\n", +"Formatter.SetPreferredMimeTypesFor(typeof\u003cobj\u003e, \"text/plain\")\n", +"Formatter.Register(fun (x:obj) (writer: TextWriter) -\u003e fprintfn writer \"%120A\" x )\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/quickstart.ipynb)\u0026emsp;\n", +"[![Script](img/badge-script.svg)](quickstart.fsx)\u0026emsp;\n", +"[![Script](img/badge-notebook.svg)](quickstart.ipynb)\n", +"\n", +"# Quickstart\n", +"\n", +"Here we cover some key tasks involved in a typical machine learning pipeline and how these can be implemented with Furnace. Note that a significant part of Furnace\u0027s design has been influenced by [PyTorch](https://pytorch.org/) and you would feel mostly at home if you have familiarity with PyTorch.\n", +"\n", +"## Datasets and Data Loaders\n", +"\n", +"Furnace provides the [Dataset](https://fsprojects.github.io/Furnace/reference/furnace-data-dataset.html) type that represents a data source and the [DataLoader](https://fsprojects.github.io/Furnace/reference/furnace-data-dataloader.html) type that handles the loading of data from datasets and iterating over [minibatches](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Iterative_method) of data.\n", +"\n", +"See the [Furnace.Data](/Furnace/reference/furnace-data.html) namespace for the full API reference.\n", +"\n", +"### Datasets\n", +"\n", +"Furnace has ready-to-use types that cover main datasets typically used in machine learning, such as [MNIST](https://fsprojects.github.io/Furnace/reference/furnace-data-mnist.html), [CIFAR10](https://fsprojects.github.io/Furnace/reference/furnace-data-cifar10.html), [CIFAR100](https://fsprojects.github.io/Furnace/reference/furnace-data-cifar100.html), and also more generic dataset types such as [TensorDataset](https://fsprojects.github.io/Furnace/reference/furnace-data-tensordataset.html) or [ImageDataset](https://fsprojects.github.io/Furnace/reference/furnace-data-imagedataset.html).\n", +"\n", +"The following loads the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset and shows one image entry and the corresponding label.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 2, "outputs": [ + { + "data": { + "text/plain": ["No value returned by any evaluator"] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["open Furnace\n", +"open Furnace.Data\n", +"\n", +"// First ten images in MNIST training set\n", +"let dataset = MNIST(\"../data\", train=true, transform=id, n=10)\n", +"\n", +"// Inspect a single image and label\n", +"let data, label = dataset[7]\n", +"\n", +"// Save image to file\n", +"data.saveImage(\"test.png\")\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 4, "outputs": [ + { + "data": { + "text/plain": ["Data: \" ", +" ", +" ", +" ", +" ", +" ~-}@#####Z ", +" -j*W########J\u0027 ", +" O############i ", +" [##Mxxxxo####i ", +" ::^ \u0027W##Z ", +" |\u0026##f ", +" (o###\u0027 ", +" (q%###d. ", +" \"uaaa####8}: ", +" _m########O ", +" _*####@####? ", +" \"v\u003c____f##? ", +" `##? ", +" |##? ", +" ?. 1\u0026##? ", +" iQ#: `)8##\u0026! ", +" p##txxxxb###o\\ ", +" p#########MC. ", +" +J#####wdt_ ", +" }B#Z}^ ", +" ", +" ", +" ", +"\"", +"Label: tensor(3,dtype=Int32,device=cpu-1)"] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["// Inspect data as ASCII and show label\n", +"printfn \"Data: %A\\nLabel: %A\" (data.toImageString()) label\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["### Data Loaders\n", +"\n", +"A data loader handles tasks such as constructing minibatches from an underlying dataset on-the-fly, shuffling the data, and moving the data tensors between devices. In the example below we show a single batch of six MNIST images and their corresponding classification labels.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 5, "outputs": [ + { + "data": { + "text/plain": [""] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["let loader = DataLoader(dataset, shuffle=true, batchSize=6)\n", +"let batch, labels = loader.batch()\n", +"\n", +"printfn \"%A\\nLabels: %A\" (batch.toImageString()) labels\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["In practice a data loader is typically used to iterate over all minibatches in a given dataset in order to feed each minibatch through a machine learning model. One full iteration over the dataset would be called an \"epoch\". Typically you would perform multiple such epochs of iterations during the training of a model.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 6, "outputs": [], + "source": ["for epoch = 1 to 10 do\n", +" for i, data, labels in loader.epoch() do\n", +" printfn \"Epoch %A, minibatch %A\" epoch (i+1)\n", +" // Process the minibatch\n", +" // ...\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["## Models\n", +"\n", +"Many machine learning models are differentiable functions whose parameters can be tuned via [gradient-based optimization](https://en.wikipedia.org/wiki/Gradient_descent), finding an optimum for an objective function that quantifies the fit of the model to a given set of data. These models are typically built as compositions non-linear functions and ready-to-use building blocks such as linear, recurrent, and convolutional layers.\n", +"\n", +"Furnace provides the most commonly used model building blocks including convolutions, transposed convolutions, batch normalization, dropout, recurrent and other architectures.\n", +"\n", +"See the [Furnace.Model](/Furnace/reference/furnace-model.html) namespace for the full API reference.\n", +"\n", +"### Constructing models, PyTorch style\n", +"\n", +"If you have experience with [PyTorch](https://pytorch.org/), you would find the following way of model definition familiar. Let\u0027s look at an example of a [generative adversarial network (GAN)](https://arxiv.org/abs/1406.2661) architecture.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 7, "outputs": [ + { + "data": { + "text/plain": [""] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["open Furnace.Model\n", +"open Furnace.Compose\n", +"\n", +"// PyTorch style\n", +"\n", +"// Define a model class inheriting the base\n", +"type Generator(nz: int) =\n", +" inherit Model()\n", +" let fc1 = Linear(nz, 256)\n", +" let fc2 = Linear(256, 512)\n", +" let fc3 = Linear(512, 1024)\n", +" let fc4 = Linear(1024, 28*28)\n", +" do base.addModel(fc1, fc2, fc3, fc4)\n", +" override self.forward(x) =\n", +" x\n", +" |\u003e FurnaceImage.view([-1;nz])\n", +" |\u003e fc1.forward\n", +" |\u003e FurnaceImage.leakyRelu(0.2)\n", +" |\u003e fc2.forward\n", +" |\u003e FurnaceImage.leakyRelu(0.2)\n", +" |\u003e fc3.forward\n", +" |\u003e FurnaceImage.leakyRelu(0.2)\n", +" |\u003e fc4.forward\n", +" |\u003e FurnaceImage.tanh\n", +"\n", +"// Define a model class inheriting the base\n", +"type Discriminator(nz:int) =\n", +" inherit Model()\n", +" let fc1 = Linear(28*28, 1024)\n", +" let fc2 = Linear(1024, 512)\n", +" let fc3 = Linear(512, 256)\n", +" let fc4 = Linear(256, 1)\n", +" do base.addModel(fc1, fc2, fc3, fc4)\n", +" override self.forward(x) =\n", +" x\n", +" |\u003e FurnaceImage.view([-1;28*28])\n", +" |\u003e fc1.forward\n", +" |\u003e FurnaceImage.leakyRelu(0.2)\n", +" |\u003e FurnaceImage.dropout(0.3)\n", +" |\u003e fc2.forward\n", +" |\u003e FurnaceImage.leakyRelu(0.2)\n", +" |\u003e FurnaceImage.dropout(0.3)\n", +" |\u003e fc3.forward\n", +" |\u003e FurnaceImage.leakyRelu(0.2)\n", +" |\u003e FurnaceImage.dropout(0.3)\n", +" |\u003e fc4.forward\n", +" |\u003e FurnaceImage.sigmoid\n", +"\n", +"// Instantiate the defined classes\n", +"let nz = 128\n", +"let gen = Generator(nz)\n", +"let dis = Discriminator(nz)\n", +"\n", +"print gen\n", +"print dis\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["### Constructing models, Furnace style\n", +"\n", +"A key advantage of Furnace lies in the [functional programming](https://en.wikipedia.org/wiki/Functional_programming) paradigm enabled by the F# language, where functions are first-class citizens, many algorithms can be constructed by applying and composing functions, and differentiation operations can be expressed as composable [higher-order functions](https://en.wikipedia.org/wiki/Higher-order_function). This allows very succinct (and beautiful) machine learning code to be expressed as a powerful combination of [lambda calculus](https://en.wikipedia.org/wiki/Lambda_calculus) and [differential calculus](https://en.wikipedia.org/wiki/Differential_calculus).\n", +"\n", +"For example, the following constructs the same GAN architecture (that we constructed in PyTorch style in the previous section) using Furnace\u0027s `--\u003e` composition operator, which allows you to seamlessly compose `Model` instances and differentiable `Tensor-\u003eTensor` functions.\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 8, "outputs": [ + { + "data": { + "text/plain": [""] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + }], + "source": ["// Furnace style\n", +"\n", +"// Model as a composition of models and Tensor-\u003eTensor functions\n", +"let generator =\n", +" FurnaceImage.view([-1;nz])\n", +" --\u003e Linear(nz, 256)\n", +" --\u003e FurnaceImage.leakyRelu(0.2)\n", +" --\u003e Linear(256, 512)\n", +" --\u003e FurnaceImage.leakyRelu(0.2)\n", +" --\u003e Linear(512, 1024)\n", +" --\u003e FurnaceImage.leakyRelu(0.2)\n", +" --\u003e Linear(1024, 28*28)\n", +" --\u003e FurnaceImage.tanh\n", +"\n", +"// Model as a composition of models and Tensor-\u003eTensor functions\n", +"let discriminator =\n", +" FurnaceImage.view([-1; 28*28])\n", +" --\u003e Linear(28*28, 1024)\n", +" --\u003e FurnaceImage.leakyRelu(0.2)\n", +" --\u003e FurnaceImage.dropout(0.3)\n", +" --\u003e Linear(1024, 512)\n", +" --\u003e FurnaceImage.leakyRelu(0.2)\n", +" --\u003e FurnaceImage.dropout(0.3)\n", +" --\u003e Linear(512, 256)\n", +" --\u003e FurnaceImage.leakyRelu(0.2)\n", +" --\u003e FurnaceImage.dropout(0.3)\n", +" --\u003e Linear(256, 1)\n", +" --\u003e FurnaceImage.sigmoid\n", +"\n", +"print generator\n", +"print discriminator\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/reference/furnace-backend.html b/reference/furnace-backend.html new file mode 100644 index 00000000..3000c340 --- /dev/null +++ b/reference/furnace-backend.html @@ -0,0 +1,416 @@ + + + + + Backend (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Backend Type +

+ +
+
+

+ + Represents a backend for Furnace tensors + +

+
+
+
+
+
+
+

+ Union cases +

+ + + + + + + + + + + + + + + + + + + + + +
+ Union case + + Description +
+
+ +

+ + + Other(name, code) + + +

+
+
+
+ Full Usage: + Other(name, code) +
+
+ Parameters: +
    + + + name + + : + string + +
    + + + code + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ + Reserved for future use + +

+
+
+
+ + name + + : + string +
+
+
+ + code + + : + int +
+
+
+
+
+ +

+ + + Reference + + +

+
+
+
+ Full Usage: + Reference +
+
+
+
+
+
+
+ + + + +

+ + The reference backend + +

+
+
+
+ +

+ + + Torch + + +

+
+
+
+ Full Usage: + Torch +
+
+
+
+
+
+
+ + + + +

+ + The LibTorch backend + +

+
+
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.Name + + +

+
+
+
+ Full Usage: + this.Name +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Get the name of the backend + +

+
+
+
+ + Returns: + + string +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-backendfunctionality-1.html b/reference/furnace-backendfunctionality-1.html new file mode 100644 index 00000000..67913376 --- /dev/null +++ b/reference/furnace-backendfunctionality-1.html @@ -0,0 +1,388 @@ + + + + + BackendFunctionality<'T> (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ BackendFunctionality<'T> Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + BackendFunctionality() + + +

+
+
+
+ Full Usage: + BackendFunctionality() +
+
+ + Returns: + BackendFunctionality<'T> + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + BackendFunctionality<'T> +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.Backends + + +

+
+
+
+ Full Usage: + this.Backends +
+
+ + Returns: + ConcurrentDictionary<int, 'T> + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + ConcurrentDictionary<int, 'T> +
+
+
+
+
+ +

+ + + this.Get + + +

+
+
+
+ Full Usage: + this.Get +
+
+ Parameters: +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + 'T + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + 'T +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-backendmodule.html b/reference/furnace-backendmodule.html new file mode 100644 index 00000000..03a29fd1 --- /dev/null +++ b/reference/furnace-backendmodule.html @@ -0,0 +1,316 @@ + + + + + Backend (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Backend Module +

+ +
+
+

+ + Contains functions and settings related to backend specifications. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + Default + + +

+
+
+
+ Full Usage: + Default +
+
+ + Returns: + Backend + +
+
+
+
+
+
+ + + + + + +

+ + Get or set the default backend used when creating tensors. Note, use FurnaceImage.config(...) instead. + +

+
+
+
+ + Returns: + + Backend +
+
+
+
+
+ +

+ + + Register name + + +

+
+
+
+ Full Usage: + Register name +
+
+ Parameters: +
    + + + name + + : + string + +
    +
+
+ + Returns: + Backend + +
+
+
+
+
+
+ + + + + + +

+ + Register a new backend + +

+
+
+
+ + name + + : + string +
+
+
+
+
+ + Returns: + + Backend +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-backends-backendtensorstatics.html b/reference/furnace-backends-backendtensorstatics.html new file mode 100644 index 00000000..2c435581 --- /dev/null +++ b/reference/furnace-backends-backendtensorstatics.html @@ -0,0 +1,1839 @@ + + + + + BackendTensorStatics (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ BackendTensorStatics Type +

+ +
+
+

+ + Represents the static functionality for tensors implemented by a Furnace backend. + +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + BackendTensorStatics() + + +

+
+
+
+ Full Usage: + BackendTensorStatics() +
+
+ + Returns: + BackendTensorStatics + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + BackendTensorStatics +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.CreateFromFlatArray + + +

+
+
+
+ Full Usage: + this.CreateFromFlatArray +
+
+ Parameters: +
    + + + data + + : + Array + +
    + + + shape + + : + Shape + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Create a tensor of appropriate dtype from a scalar or array of appropriate values. + A backend type is delivered consistent with in-memory data - a type for dtype Int32 gets int32 data etc. + +

+
+
+
+ + data + + : + Array +
+
+
+ + shape + + : + Shape +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Empty + + +

+
+
+
+ Full Usage: + this.Empty +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with arbitrary values for the given shape and device + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Full + + +

+
+
+
+ Full Usage: + this.Full +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + value + + : + scalar + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with the given value for the given shape and device + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + value + + : + scalar +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.GetDevices + + +

+
+
+
+ Full Usage: + this.GetDevices +
+
+ Parameters: + +
+ + Returns: + Device list + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the devices supported by this backend + +

+
+
+
+ + ?deviceType + + : + DeviceType +
+
+
+
+
+ + Returns: + + Device list +
+
+
+
+
+ +

+ + + this.IsDeviceTypeAvailable + + +

+
+
+
+ Full Usage: + this.IsDeviceTypeAvailable +
+
+ Parameters: + +
+ + Returns: + bool + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Indicates if a device type is supported by this backend + +

+
+
+
+ + deviceType + + : + DeviceType +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.One + + +

+
+
+
+ Full Usage: + this.One +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the scalar 1 tensor for the given device + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Ones + + +

+
+
+
+ Full Usage: + this.Ones +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with ones for the given shape and device + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Random + + +

+
+
+
+ Full Usage: + this.Random +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random values for the given shape and device + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.RandomInt + + +

+
+
+
+ Full Usage: + this.RandomInt +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + low + + : + int + +
    + + + high + + : + int + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random integers from the given range for the given shape and device + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + low + + : + int +
+
+
+ + high + + : + int +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.RandomNormal + + +

+
+
+
+ Full Usage: + this.RandomNormal +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random values from the normal distribution for the given shape and device + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Seed + + +

+
+
+
+ Full Usage: + this.Seed +
+
+ Parameters: +
    + + + seed + + : + int + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Sets the seed for the default random number generator of the backend + +

+
+
+
+ + seed + + : + int +
+
+
+
+
+ +

+ + + this.Zero + + +

+
+
+
+ Full Usage: + this.Zero +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the scalar 0 tensor for the given device + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Zeros + + +

+
+
+
+ Full Usage: + this.Zeros +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dtype + + : + Dtype + +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with zeros for the given shape and device + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dtype + + : + Dtype +
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + BackendTensorStatics.Get(?backend) + + +

+
+
+
+ Full Usage: + BackendTensorStatics.Get(?backend) +
+
+ Parameters: +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + BackendTensorStatics + +
+
+
+
+
+
+ + + + + + +

+ + Get the backend implementation for the given tensor element type and backend. + +

+
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + BackendTensorStatics +
+
+
+
+
+ +

+ + + BackendTensorStatics.Seed(?seed) + + +

+
+
+
+ Full Usage: + BackendTensorStatics.Seed(?seed) +
+
+ Parameters: +
    + + + ?seed + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Seed all backends with the given random seed, or a new seed based on the current time + if no seed is specified. + +

+
+
+
+ + ?seed + + : + int +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-backends-rawtensor.html b/reference/furnace-backends-rawtensor.html new file mode 100644 index 00000000..ad965afd --- /dev/null +++ b/reference/furnace-backends-rawtensor.html @@ -0,0 +1,14519 @@ + + + + + RawTensor (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ RawTensor Type +

+ +
+
+

+ + Represents a raw (i.e. non-differentiable immutable) tensor implemented by a Furnace backend. + +

+
+

+ + Each backend will provide one of more .NET implementations of this type, which may in turn + wrap handles to native implementations. + +

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + RawTensor() + + +

+
+
+
+ Full Usage: + RawTensor() +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.AbsInPlace + + +

+
+
+
+ Full Usage: + this.AbsInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise absolute value of the tensor + +

+
+
+
+ +

+ + + this.AbsT + + +

+
+
+
+ Full Usage: + this.AbsT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise absolute value of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AcosInPlace + + +

+
+
+
+ Full Usage: + this.AcosInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise cos of the tensor + +

+
+
+
+ +

+ + + this.AcosT + + +

+
+
+
+ Full Usage: + this.AcosT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise cos of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AddInPlace + + +

+
+
+
+ Full Usage: + this.AddInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise addition of the two tensors + +

+
+
+
+ + arg0 + + : + RawTensor +
+
+
+ + ?alpha + + : + scalar +
+
+
+
+
+ +

+ + + this.AddScalarInPlace + + +

+
+
+
+ Full Usage: + this.AddScalarInPlace +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise addition of two scalars + +

+
+
+
+ + b + + : + scalar +
+
+
+
+
+ +

+ + + this.AddSliceInPlace + + +

+
+
+
+ Full Usage: + this.AddSliceInPlace +
+
+ Parameters: +
    + + + location + + : + int[] + +
    + + + t2 + + : + RawTensor + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Adds a slice of t2 at the given location to the tensor + +

+
+
+
+ + location + + : + int[] +
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.AddTT + + +

+
+
+
+ Full Usage: + this.AddTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise addition of the two tensors + +

+
+
+
+ + arg0 + + : + RawTensor +
+
+
+ + ?alpha + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AddTT0 + + +

+
+
+
+ Full Usage: + this.AddTT0 +
+
+ Parameters: +
    + + + b + + : + scalar + +
    + + + ?alpha + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise addition of a tensor and a scalar + +

+
+
+
+ + b + + : + scalar +
+
+
+ + ?alpha + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AddTTSlice + + +

+
+
+
+ Full Usage: + this.AddTTSlice +
+
+ Parameters: +
    + + + location + + : + int[] + +
    + + + t2 + + : + RawTensor + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Adds a slice of t2 at the given location to the tensor + +

+
+
+
+ + location + + : + int[] +
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AllClose + + +

+
+
+
+ Full Usage: + this.AllClose +
+
+ Parameters: +
    + + + t2 + + : + RawTensor + +
    + + + relativeTolerance + + : + float + +
    + + + absoluteTolerance + + : + float + +
    +
+
+ + Returns: + bool + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Indicates if the two tensors have the same shape and element type, and all corresponding values + are equal up to the given tolerances. + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+ + relativeTolerance + + : + float +
+
+
+ + absoluteTolerance + + : + float +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.AsinInPlace + + +

+
+
+
+ Full Usage: + this.AsinInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise asin of the tensor + +

+
+
+
+ +

+ + + this.AsinT + + +

+
+
+
+ Full Usage: + this.AsinT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise asin of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AtanInPlace + + +

+
+
+
+ Full Usage: + this.AtanInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise atan of the tensor + +

+
+
+
+ +

+ + + this.AtanT + + +

+
+
+
+ Full Usage: + this.AtanT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise atan of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AvgPool1D + + +

+
+
+
+ Full Usage: + this.AvgPool1D +
+
+ Parameters: +
    + + + kernelSize + + : + int + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 1D avgpool of a tensor + +

+
+
+
+ + kernelSize + + : + int +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AvgPool2D + + +

+
+
+
+ Full Usage: + this.AvgPool2D +
+
+ Parameters: +
    + + + kernelSize + + : + int[] + +
    + + + stride + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 2D avgpool of a tensor + +

+
+
+
+ + kernelSize + + : + int[] +
+
+
+ + stride + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AvgPool3D + + +

+
+
+
+ Full Usage: + this.AvgPool3D +
+
+ Parameters: +
    + + + kernelSize + + : + int[] + +
    + + + stride + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 2D avgpool of a tensor + +

+
+
+
+ + kernelSize + + : + int[] +
+
+
+ + stride + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AvgPoolReverse1D + + +

+
+
+
+ Full Usage: + this.AvgPoolReverse1D +
+
+ Parameters: +
    + + + originalInput + + : + RawTensor + +
    + + + kernelSize + + : + int + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Returns the reverse mode of a 1D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input +

+
+

+ The originalInput parameter is only used for shape information +

+
+
+ + originalInput + + : + RawTensor +
+
+
+ + kernelSize + + : + int +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AvgPoolReverse2D + + +

+
+
+
+ Full Usage: + this.AvgPoolReverse2D +
+
+ Parameters: +
    + + + originalInput + + : + RawTensor + +
    + + + kernelSize + + : + int[] + +
    + + + stride + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Returns the reverse mode of a 2D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input +

+
+

+ The originalInput parameter is only used for shape information +

+
+
+ + originalInput + + : + RawTensor +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + stride + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.AvgPoolReverse3D + + +

+
+
+
+ Full Usage: + this.AvgPoolReverse3D +
+
+ Parameters: +
    + + + originalInput + + : + RawTensor + +
    + + + kernelSize + + : + int[] + +
    + + + stride + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Returns the reverse mode of a 3D avgpool of a tensor, apportioning each part of the adjoint equally to each corresponding input +

+
+

+ The originalInput parameter is only used for shape information +

+
+
+ + originalInput + + : + RawTensor +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + stride + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.BMMTT + + +

+
+
+
+ Full Usage: + this.BMMTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the batched matrix multiplication of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Backend + + +

+
+
+
+ Full Usage: + this.Backend +
+
+ + Returns: + Backend + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the backend for the tensor + +

+
+
+
+ + Returns: + + Backend +
+
+
+
+
+ +

+ + + this.Cast + + +

+
+
+
+ Full Usage: + this.Cast +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor where the elements have each been cast to the given tensor element storage type. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.CatTs + + +

+
+
+
+ Full Usage: + this.CatTs +
+
+ Parameters: +
    + + + tensors + + : + RawTensor[] + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Concatenate the given tensors along the given dimension + +

+
+
+
+ + tensors + + : + RawTensor[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.CeilInPlace + + +

+
+
+
+ Full Usage: + this.CeilInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise integer ceiling of the tensor + +

+
+
+
+ +

+ + + this.CeilT + + +

+
+
+
+ Full Usage: + this.CeilT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise integer ceiling of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.ClampInPlace + + +

+
+
+
+ Full Usage: + this.ClampInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by with values constrained by the corresponding elements in the low/high tensors. + +

+
+
+
+ + low + + : + RawTensor +
+
+
+ + high + + : + RawTensor +
+
+
+
+
+ +

+ + + this.ClampT + + +

+
+
+
+ Full Usage: + this.ClampT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor with values constrained by the corresponding elements in the low/high tensors. + +

+
+
+
+ + low + + : + RawTensor +
+
+
+ + high + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Clone + + +

+
+
+
+ Full Usage: + this.Clone +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Clone the underlying storage of the tensor. + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.ComputeHash + + +

+
+
+
+ Full Usage: + this.ComputeHash +
+
+ + Returns: + int + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a hash of the contents of the tensor. This operation may cause the + tensor to be moved to the CPU, and its entire contents iterated. + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.Conv1D + + +

+
+
+
+ Full Usage: + this.Conv1D +
+
+ Parameters: +
    + + + kernel + + : + RawTensor + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 1D convolution of the tensor + +

+
+
+
+ + kernel + + : + RawTensor +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Conv2D + + +

+
+
+
+ Full Usage: + this.Conv2D +
+
+ Parameters: +
    + + + kernel + + : + RawTensor + +
    + + + strides + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 2D convolution of the tensor + +

+
+
+
+ + kernel + + : + RawTensor +
+
+
+ + strides + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Conv3D + + +

+
+
+
+ Full Usage: + this.Conv3D +
+
+ Parameters: +
    + + + kernel + + : + RawTensor + +
    + + + strides + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 3D convolution of the tensor + +

+
+
+
+ + kernel + + : + RawTensor +
+
+
+ + strides + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.CosInPlace + + +

+
+
+
+ Full Usage: + this.CosInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise cosine of the tensor + +

+
+
+
+ +

+ + + this.CosT + + +

+
+
+
+ Full Usage: + this.CosT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise cosine of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.CoshInPlace + + +

+
+
+
+ Full Usage: + this.CoshInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise cosh of the tensor + +

+
+
+
+ +

+ + + this.CoshT + + +

+
+
+
+ Full Usage: + this.CoshT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise cosh of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.CreateLike + + +

+
+
+
+ Full Usage: + this.CreateLike +
+
+ Parameters: +
    + + + values + + : + obj + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with values drawn from the given .NET object for the + given configuration settings, defaulting to the configuration settings of the object tensor. + +

+
+
+
+ + values + + : + obj +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.DetT + + +

+
+
+
+ Full Usage: + this.DetT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the determinant of a square matrix + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Device + + +

+
+
+
+ Full Usage: + this.Device +
+
+ + Returns: + Device + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the device for the tensor + +

+
+
+
+ + Returns: + + Device +
+
+
+
+
+ +

+ + + this.DeviceType + + +

+
+
+
+ Full Usage: + this.DeviceType +
+
+ + Returns: + DeviceType + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the device type for the tensor + +

+
+
+
+ + Returns: + + DeviceType +
+
+
+
+
+ +

+ + + this.DilateT + + +

+
+
+
+ Full Usage: + this.DilateT +
+
+ Parameters: +
    + + + dilations + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the dilation of the tensor using the given dilations parameters + +

+
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Dim + + +

+
+
+
+ Full Usage: + this.Dim +
+
+ + Returns: + int + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the dimensionality of the tensor + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.DivFromT0T + + +

+
+
+
+ Full Usage: + this.DivFromT0T +
+
+ Parameters: +
    + + + t1 + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise division of a scalar by a tensor, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t1 + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.DivInPlace + + +

+
+
+
+ Full Usage: + this.DivInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise division of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.DivScalarInPlace + + +

+
+
+
+ Full Usage: + this.DivScalarInPlace +
+
+ Parameters: +
    + + + t2 + + : + scalar + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise division of a tensor by a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t2 + + : + scalar +
+
+
+
+
+ +

+ + + this.DivTT + + +

+
+
+
+ Full Usage: + this.DivTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise division of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.DivTT0 + + +

+
+
+
+ Full Usage: + this.DivTT0 +
+
+ Parameters: +
    + + + t2 + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise division of a tensor by a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t2 + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Dtype + + +

+
+
+
+ Full Usage: + this.Dtype +
+
+ + Returns: + Dtype + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the element storage type for the tensor + +

+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + this.EmptyLike + + +

+
+
+
+ Full Usage: + this.EmptyLike +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with arbitrary values for the given shape and configuration settings, + defaulting to the configuration settings of the object tensor + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.EqInPlace + + +

+
+
+
+ Full Usage: + this.EqInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.EqTT + + +

+
+
+
+ Full Usage: + this.EqTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Equals + + +

+
+
+
+ Full Usage: + this.Equals +
+
+ Parameters: + +
+ + Returns: + bool + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Compare two tensors for equality + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.ExpInPlace + + +

+
+
+
+ Full Usage: + this.ExpInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise natural exponentiation of the tensor + +

+
+
+
+ +

+ + + this.ExpT + + +

+
+
+
+ Full Usage: + this.ExpT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise natural exponentiation of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Expand + + +

+
+
+
+ Full Usage: + this.Expand +
+
+ Parameters: +
    + + + newShape + + : + Shape + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Expand the shape of the tensor. + +

+
+
+
+ + newShape + + : + Shape +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.FlipT + + +

+
+
+
+ Full Usage: + this.FlipT +
+
+ Parameters: +
    + + + dims + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the flip of the tensor along the given dimensions + +

+
+
+
+ + dims + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.FloorInPlace + + +

+
+
+
+ Full Usage: + this.FloorInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise integer floor of the tensor + +

+
+
+
+ +

+ + + this.FloorT + + +

+
+
+
+ Full Usage: + this.FloorT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise integer floor of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.FullLike + + +

+
+
+
+ Full Usage: + this.FullLike +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + value + + : + scalar + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with the given scalar value for the given shape and configuration settings, + defaulting to the configuration settings of the object tensor + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + value + + : + scalar +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.GatherT + + +

+
+
+
+ Full Usage: + this.GatherT +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + indices + + : + RawTensor + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor selecting the given indices from the given dimension and stacking those in the order specified. + +

+
+
+
+ + dim + + : + int +
+
+
+ + indices + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.GeInPlace + + +

+
+
+
+ Full Usage: + this.GeInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.GeTT + + +

+
+
+
+ Full Usage: + this.GeTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.GetItem + + +

+
+
+
+ Full Usage: + this.GetItem +
+
+ Parameters: +
    + + + indexes + + : + int[] + +
    +
+
+ + Returns: + scalar + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a .NET object representing the value of the tensor at the given indexes + +

+
+
+
+ + indexes + + : + int[] +
+
+
+
+
+ + Returns: + + scalar +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + fullBounds + + : + int[,] + - + + The indexes are an Nx3 array. The first row is the start bounds, the second row is + the end bounds, the third is 1/0 indicating dimension removal. + + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Get a slice of the given tensor. +

+
+
+
+ + fullBounds + + : + int[,] +
+
+

+ + The indexes are an Nx3 array. The first row is the start bounds, the second row is + the end bounds, the third is 1/0 indicating dimension removal. + +

+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.GtInPlace + + +

+
+
+
+ Full Usage: + this.GtInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.GtTT + + +

+
+
+
+ Full Usage: + this.GtTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Handle + + +

+
+
+
+ Full Usage: + this.Handle +
+
+ + Returns: + obj + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a handle to the underlying representation of the the tensor. For example, if the Torch + backend is used this will be the corresponding TorchSharp TorchTensor. + +

+
+
+
+ + Returns: + + obj +
+
+
+
+
+ +

+ + + this.InverseT + + +

+
+
+
+ Full Usage: + this.InverseT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the inverse of a single square matrix (2d tensor) or a batch of square matrices (3d tensor) + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.IsInfT + + +

+
+
+
+ Full Usage: + this.IsInfT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor where each element indicates if the corresponding element in the tensor is an infinity value + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.IsMutable + + +

+
+
+
+ Full Usage: + this.IsMutable +
+
+ + Returns: + bool + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.IsNaNT + + +

+
+
+
+ Full Usage: + this.IsNaNT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor where each element indicates if the corresponding element in the tensor is a NaN value + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.LeInPlace + + +

+
+
+
+ Full Usage: + this.LeInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.LeTT + + +

+
+
+
+ Full Usage: + this.LeTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Log10InPlace + + +

+
+
+
+ Full Usage: + this.Log10InPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise base10 logarithm of the tensor + +

+
+
+
+ +

+ + + this.Log10T + + +

+
+
+
+ Full Usage: + this.Log10T +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise base10 logarithm of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.LogInPlace + + +

+
+
+
+ Full Usage: + this.LogInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise natural logarithm of the tensor + +

+
+
+
+ +

+ + + this.LogT + + +

+
+
+
+ Full Usage: + this.LogT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise natural logarithm of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.LtInPlace + + +

+
+
+
+ Full Usage: + this.LtInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.LtTT + + +

+
+
+
+ Full Usage: + this.LtTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.MatMulInPlace + + +

+
+
+
+ Full Usage: + this.MatMulInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the matrix multiplication of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.MatMulTT + + +

+
+
+
+ Full Usage: + this.MatMulTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the matrix multiplication of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.MaxIndexT + + +

+
+
+
+ Full Usage: + this.MaxIndexT +
+
+ + Returns: + int[] + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the index of a maximum value of the tensor + +

+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + this.MaxPool1D + + +

+
+
+
+ Full Usage: + this.MaxPool1D +
+
+ Parameters: +
    + + + kernelSize + + : + int + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    +
+
+ + Returns: + RawTensor * RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 1D maxpool of a tensor and its chosen maximum indices + +

+
+
+
+ + kernelSize + + : + int +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+
+
+ + Returns: + + RawTensor * RawTensor +
+
+
+
+
+ +

+ + + this.MaxPool2D + + +

+
+
+
+ Full Usage: + this.MaxPool2D +
+
+ Parameters: +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor * RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 2D maxpool of a tensor and its chosen maximum indices + +

+
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor * RawTensor +
+
+
+
+
+ +

+ + + this.MaxPool3D + + +

+
+
+
+ Full Usage: + this.MaxPool3D +
+
+ Parameters: +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + padding + + : + int[] + +
    +
+
+ + Returns: + RawTensor * RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 3D maxpool of a tensor and its chosen maximum indices + +

+
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + padding + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor * RawTensor +
+
+
+
+
+ +

+ + + this.MaxReduceT + + +

+
+
+
+ Full Usage: + this.MaxReduceT +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + keepdim + + : + bool + +
    +
+
+ + Returns: + RawTensor * RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor containing values and indexes of a maximum value of the tensor reducing along the given dimension + +

+
+
+
+ + dim + + : + int +
+
+
+ + keepdim + + : + bool +
+
+
+
+
+ + Returns: + + RawTensor * RawTensor +
+
+
+
+
+ +

+ + + this.MaxUnpool1D + + +

+
+
+
+ Full Usage: + this.MaxUnpool1D +
+
+ Parameters: +
    + + + indices + + : + RawTensor + +
    + + + outputSize + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 1D maxunpool of a tensor using the given indices for locations of maximums + +

+
+
+
+ + indices + + : + RawTensor +
+
+
+ + outputSize + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.MaxUnpool2D + + +

+
+
+
+ Full Usage: + this.MaxUnpool2D +
+
+ Parameters: +
    + + + indices + + : + RawTensor + +
    + + + outputSize + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 2D maxunpool of a tensor using the given indices for locations of maximums + +

+
+
+
+ + indices + + : + RawTensor +
+
+
+ + outputSize + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.MaxUnpool3D + + +

+
+
+
+ Full Usage: + this.MaxUnpool3D +
+
+ Parameters: +
    + + + indices + + : + RawTensor + +
    + + + outputSize + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the 3D maxunpool of a tensor using the given indices for locations of maximums + +

+
+
+
+ + indices + + : + RawTensor +
+
+
+ + outputSize + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.MinIndexT + + +

+
+
+
+ Full Usage: + this.MinIndexT +
+
+ + Returns: + int[] + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the index of a minimum value of the tensor + +

+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + this.MinReduceT + + +

+
+
+
+ Full Usage: + this.MinReduceT +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + keepdim + + : + bool + +
    +
+
+ + Returns: + RawTensor * RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor containing values and indexes of a minimum value of the tensor reducing along the given dimension + +

+
+
+
+ + dim + + : + int +
+
+
+ + keepdim + + : + bool +
+
+
+
+
+ + Returns: + + RawTensor * RawTensor +
+
+
+
+
+ +

+ + + this.MoveTo + + +

+
+
+
+ Full Usage: + this.MoveTo +
+
+ Parameters: +
    + + + device + + : + Device + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor moved to the given device. + +

+
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.MulInPlace + + +

+
+
+
+ Full Usage: + this.MulInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise multiplication of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.MulScalarInPlace + + +

+
+
+
+ Full Usage: + this.MulScalarInPlace +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise multiplication of a tensor and a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + b + + : + scalar +
+
+
+
+
+ +

+ + + this.MulTT + + +

+
+
+
+ Full Usage: + this.MulTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise multiplication of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.MulTT0 + + +

+
+
+
+ Full Usage: + this.MulTT0 +
+
+ Parameters: +
    + + + t2 + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise multiplication of a tensor and a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t2 + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.NegInPlace + + +

+
+
+
+ Full Usage: + this.NegInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise negation of the tensor + +

+
+
+
+ +

+ + + this.NegT + + +

+
+
+
+ Full Usage: + this.NegT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise negation of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Nelement + + +

+
+
+
+ Full Usage: + this.Nelement +
+
+ + Returns: + int + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the number of elements in the tensor + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.NeqInPlace + + +

+
+
+
+ Full Usage: + this.NeqInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.NeqTT + + +

+
+
+
+ Full Usage: + this.NeqTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a boolean tensor comparing each element pairwise with the corresponding element in t2 + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.OneLike + + +

+
+
+
+ Full Usage: + this.OneLike +
+
+ Parameters: +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a scalar one tensor for the given configuration settings, defaulting to the configuration settings of the object tensor + +

+
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.OnesInPlace + + +

+
+
+
+ Full Usage: + this.OnesInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by setting all values to one + +

+
+
+
+ +

+ + + this.OnesLike + + +

+
+
+
+ Full Usage: + this.OnesLike +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with one values for the given shape and configuration settings, + defaulting to the configuration settings of the object tensor + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.PermuteT + + +

+
+
+
+ Full Usage: + this.PermuteT +
+
+ Parameters: +
    + + + permutation + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a view of the original tensor with its dimensions permuted + +

+
+
+
+ + permutation + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.PowFromT0T + + +

+
+
+
+ Full Usage: + this.PowFromT0T +
+
+ Parameters: +
    + + + t1 + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise exponentiation of a scalar and a tensor, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t1 + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.PowInPlace + + +

+
+
+
+ Full Usage: + this.PowInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise exponentiation of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.PowScalarInPlace + + +

+
+
+
+ Full Usage: + this.PowScalarInPlace +
+
+ Parameters: +
    + + + t2 + + : + scalar + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise exponentiation of a tensor and a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t2 + + : + scalar +
+
+
+
+
+ +

+ + + this.PowTT + + +

+
+
+
+ Full Usage: + this.PowTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise exponentiation of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.PowTT0 + + +

+
+
+
+ Full Usage: + this.PowTT0 +
+
+ Parameters: +
    + + + t2 + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise exponentiation of a tensor and a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t2 + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.Print + + +

+
+
+
+ Full Usage: + this.Print +
+
+ Parameters: +
    + + + ?postfix + + : + string + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?postfix + + : + string +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + this.RandomInPlace + + +

+
+
+
+ Full Usage: + this.RandomInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by setting it to random values taken from a uniform distribution in [0, 1). + +

+
+
+
+ +

+ + + this.RandomIntInPlace + + +

+
+
+
+ Full Usage: + this.RandomIntInPlace +
+
+ Parameters: +
    + + + low + + : + int + +
    + + + high + + : + int + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random integers from the given range + +

+
+
+
+ + low + + : + int +
+
+
+ + high + + : + int +
+
+
+
+
+ +

+ + + this.RandomIntLike + + +

+
+
+
+ Full Usage: + this.RandomIntLike +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + low + + : + int + +
    + + + high + + : + int + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random integer values from the given range for the given shape and configuration settings, + defaulting to the configuration settings of the object tensor + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + low + + : + int +
+
+
+ + high + + : + int +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.RandomLike + + +

+
+
+
+ Full Usage: + this.RandomLike +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random values for the given shape and configuration settings, + defaulting to the configuration settings of the object tensor + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.RandomNormalInPlace + + +

+
+
+
+ Full Usage: + this.RandomNormalInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by setting all values taken from a normal distribution with mean 0 and variance 1. + +

+
+
+
+ +

+ + + this.RandomNormalLike + + +

+
+
+
+ Full Usage: + this.RandomNormalLike +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random values from a normal distribution for the given shape and configuration settings, + defaulting to the configuration settings of the object tensor + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.ReluInPlace + + +

+
+
+
+ Full Usage: + this.ReluInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise ReLU of the tensor + +

+
+
+
+ +

+ + + this.ReluT + + +

+
+
+
+ Full Usage: + this.ReluT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise ReLU of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.RoundInPlace + + +

+
+
+
+ Full Usage: + this.RoundInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise rounding of the tensor + +

+
+
+
+ +

+ + + this.RoundT + + +

+
+
+
+ Full Usage: + this.RoundT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise rounding of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.ScatterT + + +

+
+
+
+ Full Usage: + this.ScatterT +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + indices + + : + RawTensor + +
    + + + destinationShape + + : + Shape + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor with given destination shape where values are copied from the current tensor to locations specified by the dimension and indices. + +

+
+
+
+ + dim + + : + int +
+
+
+ + indices + + : + RawTensor +
+
+
+ + destinationShape + + : + Shape +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SetMutable + + +

+
+
+
+ Full Usage: + this.SetMutable +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + A backdoor to switch this tensor to be usable as a mutable tensor. You should have a unique handle to + this tensor for the entire time it is being used as a mutable tensor. + +

+
+
+
+ +

+ + + this.Shape + + +

+
+
+
+ Full Usage: + this.Shape +
+
+ + Returns: + Shape + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets the shape of the tensor + +

+
+
+
+ + Returns: + + Shape +
+
+
+
+
+ +

+ + + this.SigmoidInPlace + + +

+
+
+
+ Full Usage: + this.SigmoidInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise sigmoid of the tensor + +

+
+
+
+ +

+ + + this.SigmoidT + + +

+
+
+
+ Full Usage: + this.SigmoidT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise sigmoid of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SignInPlace + + +

+
+
+
+ Full Usage: + this.SignInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise sign of the tensor + +

+
+
+
+ +

+ + + this.SignT + + +

+
+
+
+ Full Usage: + this.SignT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise sign of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SinInPlace + + +

+
+
+
+ Full Usage: + this.SinInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise sine of the tensor + +

+
+
+
+ +

+ + + this.SinT + + +

+
+
+
+ Full Usage: + this.SinT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise sine of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SinhInPlace + + +

+
+
+
+ Full Usage: + this.SinhInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise sinh of the tensor + +

+
+
+
+ +

+ + + this.SinhT + + +

+
+
+
+ Full Usage: + this.SinhT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise sinh of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SoftplusInPlace + + +

+
+
+
+ Full Usage: + this.SoftplusInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise softplus of the tensor + +

+
+
+
+ +

+ + + this.SoftplusT + + +

+
+
+
+ Full Usage: + this.SoftplusT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise softplus of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SolveTT + + +

+
+
+
+ Full Usage: + this.SolveTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the solution of single a square system of linear equations with a unique solution or a batch of several such systems + +

+
+
+
+ + arg0 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SplitT + + +

+
+
+
+ Full Usage: + this.SplitT +
+
+ Parameters: +
    + + + sizes + + : + int[] + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + RawTensor[] + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Split the given tensors along the given dimensions + +

+
+
+
+ + sizes + + : + int[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + RawTensor[] +
+
+
+
+
+ +

+ + + this.SqrtInPlace + + +

+
+
+
+ Full Usage: + this.SqrtInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise square root of the tensor + +

+
+
+
+ +

+ + + this.SqrtT + + +

+
+
+
+ Full Usage: + this.SqrtT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise square root of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SqueezeT + + +

+
+
+
+ Full Usage: + this.SqueezeT +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the tensor with the same values and the given dimension removed. The given dimension must be of size 1. + +

+
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.StackTs + + +

+
+
+
+ Full Usage: + this.StackTs +
+
+ Parameters: +
    + + + tensors + + : + RawTensor[] + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Stack the given tensors along the given dimension + +

+
+
+
+ + tensors + + : + RawTensor[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SubFromT0T + + +

+
+
+
+ Full Usage: + this.SubFromT0T +
+
+ Parameters: +
    + + + t1 + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise subtraction of the scalar and a tensor, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t1 + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SubInPlace + + +

+
+
+
+ Full Usage: + this.SubInPlace +
+
+ Parameters: + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise subtraction of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ +

+ + + this.SubScalarInPlace + + +

+
+
+
+ Full Usage: + this.SubScalarInPlace +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise subtraction of the tensor and a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + b + + : + scalar +
+
+
+
+
+ +

+ + + this.SubTT + + +

+
+
+
+ Full Usage: + this.SubTT +
+
+ Parameters: + +
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise subtraction of two tensors + +

+
+
+
+ + t2 + + : + RawTensor +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SubTT0 + + +

+
+
+
+ Full Usage: + this.SubTT0 +
+
+ Parameters: +
    + + + t2 + + : + scalar + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise subtraction of the tensor and a scalar, where the scalar is logically + broadcast to the same shape as the tensor + +

+
+
+
+ + t2 + + : + scalar +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SumT + + +

+
+
+
+ Full Usage: + this.SumT +
+
+ Parameters: +
    + + + ?resultType + + : + Dtype + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the scalar tensor for the summation of all elements in the tensor + +

+
+
+
+ + ?resultType + + : + Dtype +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.SumTDim + + +

+
+
+
+ Full Usage: + this.SumTDim +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?resultType + + : + Dtype + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the tensor representing the summation of the tensor along the given dimension + +

+
+
+
+ + dim + + : + int +
+
+
+ + ?resultType + + : + Dtype +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.TanInPlace + + +

+
+
+
+ Full Usage: + this.TanInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise tangent of the tensor + +

+
+
+
+ +

+ + + this.TanT + + +

+
+
+
+ Full Usage: + this.TanT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise tangent of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.TanhInPlace + + +

+
+
+
+ Full Usage: + this.TanhInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by the element-wise tanh of the tensor + +

+
+
+
+ +

+ + + this.TanhT + + +

+
+
+
+ Full Usage: + this.TanhT +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise tanh of the tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.ToArray + + +

+
+
+
+ Full Usage: + this.ToArray +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ + Returns a .NET array object for the values of a non-scalar tensor + +

+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + this.ToScalar + + +

+
+
+
+ Full Usage: + this.ToScalar +
+
+ + Returns: + scalar + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Gets a .NET object representing the value of a scalar tensor + +

+
+
+
+ + Returns: + + scalar +
+
+
+
+
+ +

+ + + this.ToValues + + +

+
+
+
+ Full Usage: + this.ToValues +
+
+ + Returns: + obj + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Get a .NET object for all the values in the tensor. +

+
+

+ The runtime type of the returned object is either a .NET scalar + or array corresponding to the shape and element type of the tensor. +

+
+
+ + Returns: + + obj +
+
+
+
+
+ +

+ + + this.TransposeT + + +

+
+
+
+ Full Usage: + this.TransposeT +
+
+ Parameters: +
    + + + dim0 + + : + int + +
    + + + dim1 + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the transpose of the tensor between the given dimensions + +

+
+
+
+ + dim0 + + : + int +
+
+
+ + dim1 + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.TransposeT2 + + +

+
+
+
+ Full Usage: + this.TransposeT2 +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the transpose of a 2D tensor + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.UndilateT + + +

+
+
+
+ Full Usage: + this.UndilateT +
+
+ Parameters: +
    + + + dilations + + : + int[] + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the reverse of the dilation of the tensor using the given dilations parameters + +

+
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.UnsqueezeT + + +

+
+
+
+ Full Usage: + this.UnsqueezeT +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the tensor with the same values and a dimension of size 1 inserted before the given dimension. + +

+
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.UnstackT + + +

+
+
+
+ Full Usage: + this.UnstackT +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + RawTensor[] + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Unstack the given tensors along the given dimension + +

+
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + RawTensor[] +
+
+
+
+
+ +

+ + + this.ViewT + + +

+
+
+
+ Full Usage: + this.ViewT +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + RawTensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Returns the tensor with the same values viewed as a different shape + +

+
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.ZeroLike + + +

+
+
+
+ Full Usage: + this.ZeroLike +
+
+ Parameters: +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a zero tensor for the given configuration settings, defaulting to the configuration settings of the object tensor + +

+
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.ZerosInPlace + + +

+
+
+
+ Full Usage: + this.ZerosInPlace +
+
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ + Modifies the tensor by setting all values to zero + +

+
+
+
+ +

+ + + this.ZerosLike + + +

+
+
+
+ Full Usage: + this.ZerosLike +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with zero values for the given shape and configuration settings, + defaulting to the configuration settings of the object tensor + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + RawTensor.Create(values, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.Create(values, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + values + + : + obj + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with values drawn from the given .NET object. + +

+
+

+ + The value may be a scalar, an array, or an array of tupled objects. If the dtype is not specified + then it is inferred from the .NET type of the object. + +

+
+
+ + values + + : + obj +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.CreateFromFlatArray(values, shape, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.CreateFromFlatArray(values, shape, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + values + + : + Array + +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + values + + : + Array +
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.Empty(shape, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.Empty(shape, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor containing arbitrary values for the given shape and configuration + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.Full(shape, value, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.Full(shape, value, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + value + + : + scalar + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with the given value for the given shape and configuration + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + value + + : + scalar +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.One(?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.One(?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets the scalar 1 tensor for the given configuration + +

+
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.Ones(shape, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.Ones(shape, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with 1 values for the given shape and configuration + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.Random(shape, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.Random(shape, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random values for the given shape and configuration + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.RandomInt(shape, low, high, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.RandomInt(shape, low, high, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + low + + : + int + +
    + + + high + + : + int + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random integer values from the given range for the given shape and configuration + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + low + + : + int +
+
+
+ + high + + : + int +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.RandomNormal(shape, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.RandomNormal(shape, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets a tensor filled with random values from the normal distribution for the given shape and configuration + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.Zero(?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.Zero(?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets the scalar zero tensor for the given configuration + +

+
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + RawTensor.Zeros(shape, ?dtype, ?device, ?backend) + + +

+
+
+
+ Full Usage: + RawTensor.Zeros(shape, ?dtype, ?device, ?backend) +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + ?dtype + + : + Dtype + +
    + + + ?device + + : + Device + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets the zero tensor for the given shape and configuration + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?device + + : + Device +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-backends.html b/reference/furnace-backends.html new file mode 100644 index 00000000..95de1426 --- /dev/null +++ b/reference/furnace-backends.html @@ -0,0 +1,214 @@ + + + + + Furnace.Backends + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Furnace.Backends Namespace +

+
+

+ Contains types and functionality related to backend implementations for Furnace. +

+
+ + + + + + + + + + + + + + + + + +
+ Type + + Description +
+

+ + + BackendTensorStatics + + +

+
+
+ + + + + + +

+ + Represents the static functionality for tensors implemented by a Furnace backend. + +

+
+
+

+ + + RawTensor + + +

+
+
+ + + + + + +

+ + Represents a raw (i.e. non-differentiable immutable) tensor implemented by a Furnace backend. + +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-binaryop.html b/reference/furnace-binaryop.html new file mode 100644 index 00000000..f79b49fe --- /dev/null +++ b/reference/furnace-binaryop.html @@ -0,0 +1,1096 @@ + + + + + BinaryOp (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ BinaryOp Type +

+ +
+
+

+ Defines a new op implementing a binary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+

+

This type represents the most generic definition of a new op representing a binary function, allowing the specification of: (1) the RawTensor operation, (2) the derivative propagation rule for the forward differentiation mode and (3) the derivative propagation rule for the reverse differentiation mode.

In general, if you are implementing a simple elementwise op, you should prefer using the BinaryOpElementwise type, which is much simpler to use.

+

+
+ Example +
+

+

+ { new BinaryOp("matmul") with
+     member _.fRaw(a,b) = a.MatMulTT(b)
+     member _.ad_dfda(a,ad,b,f) = ad.matmul(b)
+     member _.bd_dfdb(a,b,bd,f) = a.matmul(bd)
+     member _.fd_dfda(a,b,f,fd) = fd.matmul(b.transpose())
+     member _.fd_dfdb(a,b,f,fd) = a.transposeExt().matmul(fd)
+ }
+

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + BinaryOp(name) + + +

+
+
+
+ Full Usage: + BinaryOp(name) +
+
+ Parameters: +
    + + + name + + : + string + +
    +
+
+ + Returns: + BinaryOp + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + name + + : + string +
+
+
+
+
+ + Returns: + + BinaryOp +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + this.ad_dfda (a, ad, b, f) + + +

+
+
+
+ Full Usage: + this.ad_dfda (a, ad, b, f) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first argument \( a \). + +
    + + + ad + + : + Tensor + - + The first argument's derivative \( \frac{\partial a}{\partial x} \). + +
    + + + b + + : + Tensor + - + The second argument \( b \). + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial a}{\partial x} \frac{\partial f(a, b)}{\partial a} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative propagation rule for forward differentiation mode for the partial derivative with respect to the first argument of the function. This represents the contribution of the function's first argument \( a \) to the derivative of \( f(a, b) \) with respect a value \( x \) earlier in the computation graph than the function's arguments. In other words, it computes the first term in the right-hand side of the equation \( \frac{\partial f(a, b)}{\partial x} = \frac{\partial a}{\partial x} \frac{\partial f(a, b)}{\partial a} + \frac{\partial b}{\partial x} \frac{\partial f(a, b)}{\partial b} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first argument \( a \). +

+
+
+ + ad + + : + Tensor +
+
+

+ The first argument's derivative \( \frac{\partial a}{\partial x} \). +

+
+
+ + b + + : + Tensor +
+
+

+ The second argument \( b \). +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial a}{\partial x} \frac{\partial f(a, b)}{\partial a} \). +

+
+
+
+
+ +

+ + + this.bd_dfdb (a, b, bd, f) + + +

+
+
+
+ Full Usage: + this.bd_dfdb (a, b, bd, f) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first argument \( a \). + +
    + + + b + + : + Tensor + - + The second argument \( b \). + +
    + + + bd + + : + Tensor + - + The second argument's derivative \( \frac{\partial b}{\partial x} \). + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial b}{\partial x} \frac{\partial f(a, b)}{\partial b} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative propagation rule for forward differentiation mode for the partial derivative with respect to the second argument of the function. This represents the contribution of the function's second argument \( b \) to the derivative of \( f(a, b) \) with respect a value \( x \) earlier in the computation graph than the function's arguments. In other words, it computes the second term in the right-hand side of the equation \( \frac{\partial f(a, b)}{\partial x} = \frac{\partial a}{\partial x} \frac{\partial f(a, b)}{\partial a} + \frac{\partial b}{\partial x} \frac{\partial f(a, b)}{\partial b} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first argument \( a \). +

+
+
+ + b + + : + Tensor +
+
+

+ The second argument \( b \). +

+
+
+ + bd + + : + Tensor +
+
+

+ The second argument's derivative \( \frac{\partial b}{\partial x} \). +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial b}{\partial x} \frac{\partial f(a, b)}{\partial b} \). +

+
+
+
+
+ +

+ + + this.fRaw (a, b) + + +

+
+
+
+ Full Usage: + this.fRaw (a, b) +
+
+ Parameters: +
    + + + a + + : + RawTensor + - + The first argument \( a \). + +
    + + + b + + : + RawTensor + - + The second argument \( b \). + +
    +
+
+ + Returns: + RawTensor + + The function's value \( f(a, b) \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ RawTensor operation \( f(a, b) \) performing the op. +

+
+
+
+ + a + + : + RawTensor +
+
+

+ The first argument \( a \). +

+
+
+ + b + + : + RawTensor +
+
+

+ The second argument \( b \). +

+
+
+
+
+ + Returns: + + RawTensor +
+
+

+ The function's value \( f(a, b) \). +

+
+
+
+
+ +

+ + + this.fd_dfda (a, b, f, fd) + + +

+
+
+
+ Full Usage: + this.fd_dfda (a, b, f, fd) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first argument \( a \). + +
    + + + b + + : + Tensor + - + The second argument \( b \). + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    + + + fd + + : + Tensor + - + The derivative with respect to the function's output \( \frac{\partial y}{\partial f(a, b)} \). + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial y}{\partial a} = \frac{\partial y}{\partial f(a, b)} \frac{\partial f(a, b)}{\partial a} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative propagation rule for reverse differentiation mode for the partial derivative with respect to the first argument of the function. This represents the derivative of a value \( y \), which comes later in the computation graph than the function's value \( f(a, b) \), with respect to the function's first argument \( a \). In other words, it computes \( \frac{\partial y}{\partial a} = \frac{\partial y}{\partial f(a, b)} \frac{\partial f(a, b)}{\partial a} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first argument \( a \). +

+
+
+ + b + + : + Tensor +
+
+

+ The second argument \( b \). +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+ + fd + + : + Tensor +
+
+

+ The derivative with respect to the function's output \( \frac{\partial y}{\partial f(a, b)} \). +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial y}{\partial a} = \frac{\partial y}{\partial f(a, b)} \frac{\partial f(a, b)}{\partial a} \). +

+
+
+
+
+ +

+ + + this.fd_dfdb (a, b, f, fd) + + +

+
+
+
+ Full Usage: + this.fd_dfdb (a, b, f, fd) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first argument \( a \). + +
    + + + b + + : + Tensor + - + The second argument \( b \). + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    + + + fd + + : + Tensor + - + The derivative with respect to the function's output \( \frac{\partial y}{\partial f(a, b)} \). + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial y}{\partial b} = \frac{\partial y}{\partial f(a, b)} \frac{\partial f(a, b)}{\partial b} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative propagation rule for reverse differentiation mode for the partial derivative with respect to the second argument of the function. This represents the derivative of a value \( y \), which comes later in the computation graph than the function's value \( f(a, b) \), with respect to the function's second argument \( b \). In other words, it computes \( \frac{\partial y}{\partial b} = \frac{\partial y}{\partial f(a, b)} \frac{\partial f(a, b)}{\partial b} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first argument \( a \). +

+
+
+ + b + + : + Tensor +
+
+

+ The second argument \( b \). +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+ + fd + + : + Tensor +
+
+

+ The derivative with respect to the function's output \( \frac{\partial y}{\partial f(a, b)} \). +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial y}{\partial b} = \frac{\partial y}{\partial f(a, b)} \frac{\partial f(a, b)}{\partial b} \). +

+
+
+
+
+ +

+ + + this.name + + +

+
+
+
+ Full Usage: + this.name +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Name of the op. + +

+
+
+
+ + Returns: + + string +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-binaryopelementwise.html b/reference/furnace-binaryopelementwise.html new file mode 100644 index 00000000..d8112443 --- /dev/null +++ b/reference/furnace-binaryopelementwise.html @@ -0,0 +1,570 @@ + + + + + BinaryOpElementwise (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ BinaryOpElementwise Type +

+ +
+
+

+ Defines a new op implementing an elementwise binary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+

+ + This type is specialized to elementwise ops. It requires the user to specify only (1) the RawTensor operation and (2) the derivative of the function with respect to each argument. The corresponding derivative propagation rules for the forward and reverse differentiation modes are automatically generated. +

If you are implementing a complex op that is not elementwise, you can use the generic type BinaryOp, which allows you to define the full derivative propagation rules.

+

+
+ Example +
+

+

+ { new BinaryOpElementwise("pow") with
+     member _.fRaw(a,b) = a.PowTT(b)
+     member _.dfda(a,b,f) = b * f / a
+     member _.dfdb(a,b,f) = f * a.log()
+ }
+ 
+ { new BinaryOpElementwise("mul") with
+     member _.fRaw(a,b) = a.MulTT(b)
+     member _.dfda(a,b,f) = b
+     member _.dfdb(a,b,f) = a
+ }
+

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + BinaryOpElementwise(name) + + +

+
+
+
+ Full Usage: + BinaryOpElementwise(name) +
+
+ Parameters: +
    + + + name + + : + string + +
    +
+
+ + Returns: + BinaryOpElementwise + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + name + + : + string +
+
+
+
+
+ + Returns: + + BinaryOpElementwise +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.dfda (a, b, f) + + +

+
+
+
+ Full Usage: + this.dfda (a, b, f) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first argument \( a \) + +
    + + + b + + : + Tensor + - + The second argument \( b \) + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial f(a, b)}{\partial a} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative of the function with respect to its first argument, \( \frac{\partial f(a, b)}{\partial a} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first argument \( a \) +

+
+
+ + b + + : + Tensor +
+
+

+ The second argument \( b \) +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial f(a, b)}{\partial a} \). +

+
+
+
+
+ +

+ + + this.dfdb (a, b, f) + + +

+
+
+
+ Full Usage: + this.dfdb (a, b, f) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first argument \( a \) + +
    + + + b + + : + Tensor + - + The second argument \( b \) + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial f(a, b)}{\partial b} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative of the function with respect to its second argument, \( \frac{\partial f(a, b)}{\partial b} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first argument \( a \) +

+
+
+ + b + + : + Tensor +
+
+

+ The second argument \( b \) +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a, b) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial f(a, b)}{\partial b} \). +

+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-compose.html b/reference/furnace-compose.html new file mode 100644 index 00000000..96e0bf5b --- /dev/null +++ b/reference/furnace-compose.html @@ -0,0 +1,9924 @@ + + + + + Compose (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Compose Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + FurnaceImage.add b + + +

+
+
+
+ Full Usage: + FurnaceImage.add b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + endVal + + : + float + +
    + + + ?startVal + + : + float + +
    + + + ?step + + : + float + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + endVal + + : + float +
+
+
+ + ?startVal + + : + float +
+
+
+ + ?step + + : + float +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + endVal + + : + int + +
    + + + ?startVal + + : + int + +
    + + + ?step + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + endVal + + : + int +
+
+
+ + ?startVal + + : + int +
+
+
+ + ?step + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.bceLoss target + + +

+
+
+
+ Full Usage: + FurnaceImage.bceLoss target +
+
+ Parameters: +
    + + + target + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + target + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.bernoulli (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.bernoulli (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.cast dtype + + +

+
+
+
+ Full Usage: + FurnaceImage.cast dtype +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dtype + + : + Dtype +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.cat dim + + +

+
+
+
+ Full Usage: + FurnaceImage.cat dim +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + seq<Tensor> -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + seq<Tensor> -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.clamp (?low, ?high) + + +

+
+
+
+ Full Usage: + FurnaceImage.clamp (?low, ?high) +
+
+ Parameters: +
    + + + ?low + + : + scalar + +
    + + + ?high + + : + scalar + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?low + + : + scalar +
+
+
+ + ?high + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.conv1d (b, ?stride, ?padding, ?dilation) + + +

+
+
+
+ Full Usage: + FurnaceImage.conv1d (b, ?stride, ?padding, ?dilation) +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.conv2d (b, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) + + +

+
+
+
+ Full Usage: + FurnaceImage.conv2d (b, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    + + + ?stride + + : + int + +
    + + + ?strides + + : + seq<int> + +
    + + + ?padding + + : + int + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilation + + : + int + +
    + + + ?dilations + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+ + ?stride + + : + int +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?padding + + : + int +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.conv3d (b, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) + + +

+
+
+
+ Full Usage: + FurnaceImage.conv3d (b, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    + + + ?stride + + : + int + +
    + + + ?strides + + : + seq<int> + +
    + + + ?padding + + : + int + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilation + + : + int + +
    + + + ?dilations + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+ + ?stride + + : + int +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?padding + + : + int +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.convTranspose1d (b, ?stride, ?padding, ?dilation, ?outputPadding) + + +

+
+
+
+ Full Usage: + FurnaceImage.convTranspose1d (b, ?stride, ?padding, ?dilation, ?outputPadding) +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?outputPadding + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?outputPadding + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.convTranspose2d (b, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.convTranspose2d (b, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?outputPadding + + : + int + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilations + + : + seq<int> + +
    + + + ?outputPaddings + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?outputPadding + + : + int +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+ + ?outputPaddings + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.convTranspose3d (b, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.convTranspose3d (b, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?outputPadding + + : + int + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilations + + : + seq<int> + +
    + + + ?outputPaddings + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?outputPadding + + : + int +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+ + ?outputPaddings + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.corrcoef () + + +

+
+
+
+ Full Usage: + FurnaceImage.corrcoef () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.cov (?correction, ?fweights, ?aweights) + + +

+
+
+
+ Full Usage: + FurnaceImage.cov (?correction, ?fweights, ?aweights) +
+
+ Parameters: +
    + + + ?correction + + : + int64 + +
    + + + ?fweights + + : + Tensor + +
    + + + ?aweights + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?correction + + : + int64 +
+
+
+ + ?fweights + + : + Tensor +
+
+
+ + ?aweights + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.crossEntropyLoss target + + +

+
+
+
+ Full Usage: + FurnaceImage.crossEntropyLoss target +
+
+ Parameters: +
    + + + target + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + target + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.diagonal (offset, ?dim1, ?dim2) + + +

+
+
+
+ Full Usage: + FurnaceImage.diagonal (offset, ?dim1, ?dim2) +
+
+ Parameters: +
    + + + offset + + : + int + +
    + + + ?dim1 + + : + int + +
    + + + ?dim2 + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + offset + + : + int +
+
+
+ + ?dim1 + + : + int +
+
+
+ + ?dim2 + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dilate dilations + + +

+
+
+
+ Full Usage: + FurnaceImage.dilate dilations +
+
+ Parameters: +
    + + + dilations + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dilations + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.div b + + +

+
+
+
+ Full Usage: + FurnaceImage.div b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dot b + + +

+
+
+
+ Full Usage: + FurnaceImage.dot b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dropout ?p + + +

+
+
+
+ Full Usage: + FurnaceImage.dropout ?p +
+
+ Parameters: +
    + + + ?p + + : + double + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?p + + : + double +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dropout2d ?p + + +

+
+
+
+ Full Usage: + FurnaceImage.dropout2d ?p +
+
+ Parameters: +
    + + + ?p + + : + double + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?p + + : + double +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dropout3d ?p + + +

+
+
+
+ Full Usage: + FurnaceImage.dropout3d ?p +
+
+ Parameters: +
    + + + ?p + + : + double + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?p + + : + double +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.eq b + + +

+
+
+
+ Full Usage: + FurnaceImage.eq b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.expand shape + + +

+
+
+
+ Full Usage: + FurnaceImage.expand shape +
+
+ Parameters: +
    + + + shape + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + shape + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.expandAs b + + +

+
+
+
+ Full Usage: + FurnaceImage.expandAs b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.flatten (startDim, ?endDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.flatten (startDim, ?endDim) +
+
+ Parameters: +
    + + + startDim + + : + int + +
    + + + ?endDim + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + startDim + + : + int +
+
+
+ + ?endDim + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.flip dims + + +

+
+
+
+ Full Usage: + FurnaceImage.flip dims +
+
+ Parameters: +
    + + + dims + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dims + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fullLike (value, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.fullLike (value, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + value + + : + scalar + +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + value + + : + scalar +
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gather (dim, indices) + + +

+
+
+
+ Full Usage: + FurnaceImage.gather (dim, indices) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + indices + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + indices + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ge b + + +

+
+
+
+ Full Usage: + FurnaceImage.ge b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gt b + + +

+
+
+
+ Full Usage: + FurnaceImage.gt b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.le b + + +

+
+
+
+ Full Usage: + FurnaceImage.le b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.leakyRelu ?negativeSlope + + +

+
+
+
+ Full Usage: + FurnaceImage.leakyRelu ?negativeSlope +
+
+ Parameters: +
    + + + ?negativeSlope + + : + float + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?negativeSlope + + : + float +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.like (value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.like (value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + value + + : + obj + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + value + + : + obj +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + float + +
    + + + endVal + + : + float + +
    + + + steps + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + startVal + + : + float +
+
+
+ + endVal + + : + float +
+
+
+ + steps + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + int + +
    + + + endVal + + : + int + +
    + + + steps + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + startVal + + : + int +
+
+
+ + endVal + + : + int +
+
+
+ + steps + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logsoftmax dim + + +

+
+
+
+ Full Usage: + FurnaceImage.logsoftmax dim +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + float + +
    + + + endVal + + : + float + +
    + + + steps + + : + int + +
    + + + ?baseVal + + : + float + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + startVal + + : + float +
+
+
+ + endVal + + : + float +
+
+
+ + steps + + : + int +
+
+
+ + ?baseVal + + : + float +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + int + +
    + + + endVal + + : + int + +
    + + + steps + + : + int + +
    + + + ?baseVal + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + startVal + + : + int +
+
+
+ + endVal + + : + int +
+
+
+ + steps + + : + int +
+
+
+ + ?baseVal + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logsumexp (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.logsumexp (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.lt b + + +

+
+
+
+ Full Usage: + FurnaceImage.lt b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.matmul b + + +

+
+
+
+ Full Usage: + FurnaceImage.matmul b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool1d (kernelSize, ?stride, ?padding) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool1d (kernelSize, ?stride, ?padding) +
+
+ Parameters: +
    + + + kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool2d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool2d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool3d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool3d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxunpool1d (indices, kernelSize, ?stride, ?padding, ?outputSize) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxunpool1d (indices, kernelSize, ?stride, ?padding, ?outputSize) +
+
+ Parameters: +
    + + + indices + + : + Tensor + +
    + + + kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?outputSize + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + indices + + : + Tensor +
+
+
+ + kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?outputSize + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxunpool2d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxunpool2d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) +
+
+ Parameters: +
    + + + indices + + : + Tensor + +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?outputSize + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + indices + + : + Tensor +
+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?outputSize + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxunpool3d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxunpool3d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) +
+
+ Parameters: +
    + + + indices + + : + Tensor + +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?outputSize + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + indices + + : + Tensor +
+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?outputSize + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mean (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.mean (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.move (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.move (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mseLoss target + + +

+
+
+
+ Full Usage: + FurnaceImage.mseLoss target +
+
+ Parameters: +
    + + + target + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + target + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mul b + + +

+
+
+
+ Full Usage: + FurnaceImage.mul b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.multinomial (numSamples, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.multinomial (numSamples, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + numSamples + + : + int + - + Number of samples to draw + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBDReturns a tensor where each row contains numSamples indices sampled from the multinomial probability distribution located in the corresponding row of tensor input. +

+
+

+ + Indices are ordered from left to right according to when each was sampled (first samples are placed in first column). + + If input is a vector, out is a vector of size num_samples. + + If input is a matrix with m rows, the result is an matrix of shape (m × numSamples) + +

+

+ Extended Type: + FurnaceImage +

+
+
+ + numSamples + + : + int +
+
+

+ Number of samples to draw +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.nllLoss target + + +

+
+
+
+ Full Usage: + FurnaceImage.nllLoss target +
+
+ Parameters: +
    + + + target + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + target + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.onehotLike (length, hot, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.onehotLike (length, hot, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + +
    + + + hot + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + length + + : + int +
+
+
+ + hot + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.onesLike (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.onesLike (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.pad paddings + + +

+
+
+
+ Full Usage: + FurnaceImage.pad paddings +
+
+ Parameters: +
    + + + paddings + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + paddings + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.pow b + + +

+
+
+
+ Full Usage: + FurnaceImage.pow b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randLike (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randLike (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randintLike (low, high, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randintLike (low, high, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + low + + : + int + +
    + + + high + + : + int + +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + low + + : + int +
+
+
+ + high + + : + int +
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randnLike (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randnLike (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.repeat (dim, times) + + +

+
+
+
+ Full Usage: + FurnaceImage.repeat (dim, times) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + times + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + times + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.scatter (dim, indices, destinationShape) + + +

+
+
+
+ Full Usage: + FurnaceImage.scatter (dim, indices, destinationShape) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + indices + + : + Tensor + +
    + + + destinationShape + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + indices + + : + Tensor +
+
+
+ + destinationShape + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.slice index + + +

+
+
+
+ Full Usage: + FurnaceImage.slice index +
+
+ Parameters: +
    + + + index + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + index + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.softmax dim + + +

+
+
+
+ Full Usage: + FurnaceImage.softmax dim +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.split (sizes, ?dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.split (sizes, ?dim) +
+
+ Parameters: +
    + + + sizes + + : + seq<int> + +
    + + + ?dim + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor[] + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + sizes + + : + seq<int> +
+
+
+ + ?dim + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor[] +
+
+
+
+
+ +

+ + + FurnaceImage.squeeze ?dim + + +

+
+
+
+ Full Usage: + FurnaceImage.squeeze ?dim +
+
+ Parameters: +
    + + + ?dim + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?dim + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.stack dim + + +

+
+
+
+ Full Usage: + FurnaceImage.stack dim +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + seq<Tensor> -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + seq<Tensor> -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.std (dim, ?keepDim, ?unbiased) + + +

+
+
+
+ Full Usage: + FurnaceImage.std (dim, ?keepDim, ?unbiased) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    + + + ?unbiased + + : + bool + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+ + ?unbiased + + : + bool +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sub b + + +

+
+
+
+ Full Usage: + FurnaceImage.sub b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sum (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.sum (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.tensor (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.tensor (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + 'a -> Tensor + +
+ Modifiers: + inline +
+ Type parameters: + 'a +
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + 'a -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.toImage (?pixelMin, ?pixelMax, ?normalize, ?gridCols) + + +

+
+
+
+ Full Usage: + FurnaceImage.toImage (?pixelMin, ?pixelMax, ?normalize, ?gridCols) +
+
+ Parameters: +
    + + + ?pixelMin + + : + double + +
    + + + ?pixelMax + + : + double + +
    + + + ?normalize + + : + bool + +
    + + + ?gridCols + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?pixelMin + + : + double +
+
+
+ + ?pixelMax + + : + double +
+
+
+ + ?normalize + + : + bool +
+
+
+ + ?gridCols + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.toImageString (?pixelMin, ?pixelMax, ?normalize, ?gridCols, ?asciiPalette) + + +

+
+
+
+ Full Usage: + FurnaceImage.toImageString (?pixelMin, ?pixelMax, ?normalize, ?gridCols, ?asciiPalette) +
+
+ Parameters: +
    + + + ?pixelMin + + : + double + +
    + + + ?pixelMax + + : + double + +
    + + + ?normalize + + : + bool + +
    + + + ?gridCols + + : + int + +
    + + + ?asciiPalette + + : + string + +
    +
+
+ + Returns: + Tensor -> string + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + ?pixelMin + + : + double +
+
+
+ + ?pixelMax + + : + double +
+
+
+ + ?normalize + + : + bool +
+
+
+ + ?gridCols + + : + int +
+
+
+ + ?asciiPalette + + : + string +
+
+
+
+
+ + Returns: + + Tensor -> string +
+
+
+
+
+ +

+ + + FurnaceImage.transpose (dim0, dim1) + + +

+
+
+
+ Full Usage: + FurnaceImage.transpose (dim0, dim1) +
+
+ Parameters: +
    + + + dim0 + + : + int + +
    + + + dim1 + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim0 + + : + int +
+
+
+ + dim1 + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.undilate dilations + + +

+
+
+
+ Full Usage: + FurnaceImage.undilate dilations +
+
+ Parameters: +
    + + + dilations + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dilations + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unflatten (dim, unflattenedShape) + + +

+
+
+
+ Full Usage: + FurnaceImage.unflatten (dim, unflattenedShape) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + unflattenedShape + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + unflattenedShape + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unsqueeze dim + + +

+
+
+
+ Full Usage: + FurnaceImage.unsqueeze dim +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unsqueezeAs other + + +

+
+
+
+ Full Usage: + FurnaceImage.unsqueezeAs other +
+
+ Parameters: +
    + + + other + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + other + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unstack dim + + +

+
+
+
+ Full Usage: + FurnaceImage.unstack dim +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor[] + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor[] +
+
+
+
+
+ +

+ + + FurnaceImage.var (dim, ?keepDim, ?unbiased) + + +

+
+
+
+ Full Usage: + FurnaceImage.var (dim, ?keepDim, ?unbiased) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    + + + ?unbiased + + : + bool + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+ + ?unbiased + + : + bool +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.view shape + + +

+
+
+
+ Full Usage: + FurnaceImage.view shape +
+
+ Parameters: +
    + + + shape + + : + seq<int> + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + shape + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.view shape + + +

+
+
+
+ Full Usage: + FurnaceImage.view shape +
+
+ Parameters: +
    + + + shape + + : + int + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + shape + + : + int +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.viewAs b + + +

+
+
+
+ Full Usage: + FurnaceImage.viewAs b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.zerosLike (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.zerosLike (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-cifar10.html b/reference/furnace-data-cifar10.html new file mode 100644 index 00000000..75803066 --- /dev/null +++ b/reference/furnace-data-cifar10.html @@ -0,0 +1,452 @@ + + + + + CIFAR10 (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ CIFAR10 Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + CIFAR10(path, ?url, ?train, ?transform, ?targetTransform) + + +

+
+
+
+ Full Usage: + CIFAR10(path, ?url, ?train, ?transform, ?targetTransform) +
+
+ Parameters: +
    + + + path + + : + string + +
    + + + ?url + + : + string + +
    + + + ?train + + : + bool + +
    + + + ?transform + + : + Tensor -> Tensor + +
    + + + ?targetTransform + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + CIFAR10 + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + path + + : + string +
+
+
+ + ?url + + : + string +
+
+
+ + ?train + + : + bool +
+
+
+ + ?transform + + : + Tensor -> Tensor +
+
+
+ + ?targetTransform + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + CIFAR10 +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.classNames + + +

+
+
+
+ Full Usage: + this.classNames +
+
+ + Returns: + string[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + string[] +
+
+
+
+
+ +

+ + + this.classes + + +

+
+
+
+ Full Usage: + this.classes +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-cifar100.html b/reference/furnace-data-cifar100.html new file mode 100644 index 00000000..82cb099d --- /dev/null +++ b/reference/furnace-data-cifar100.html @@ -0,0 +1,452 @@ + + + + + CIFAR100 (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ CIFAR100 Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + CIFAR100(path, ?url, ?train, ?transform, ?targetTransform) + + +

+
+
+
+ Full Usage: + CIFAR100(path, ?url, ?train, ?transform, ?targetTransform) +
+
+ Parameters: +
    + + + path + + : + string + +
    + + + ?url + + : + string + +
    + + + ?train + + : + bool + +
    + + + ?transform + + : + Tensor -> Tensor + +
    + + + ?targetTransform + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + CIFAR100 + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + path + + : + string +
+
+
+ + ?url + + : + string +
+
+
+ + ?train + + : + bool +
+
+
+ + ?transform + + : + Tensor -> Tensor +
+
+
+ + ?targetTransform + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + CIFAR100 +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.classNames + + +

+
+
+
+ Full Usage: + this.classNames +
+
+ + Returns: + string[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + string[] +
+
+
+
+
+ +

+ + + this.classes + + +

+
+
+
+ Full Usage: + this.classes +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-dataloader.html b/reference/furnace-data-dataloader.html new file mode 100644 index 00000000..4255077a --- /dev/null +++ b/reference/furnace-data-dataloader.html @@ -0,0 +1,632 @@ + + + + + DataLoader (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ DataLoader Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + DataLoader(dataset, batchSize, ?shuffle, ?dropLast, ?device, ?dtype, ?backend, ?targetDevice, ?targetDtype, ?targetBackend) + + +

+
+
+
+ Full Usage: + DataLoader(dataset, batchSize, ?shuffle, ?dropLast, ?device, ?dtype, ?backend, ?targetDevice, ?targetDtype, ?targetBackend) +
+
+ Parameters: +
    + + + dataset + + : + Dataset + +
    + + + batchSize + + : + int + +
    + + + ?shuffle + + : + bool + +
    + + + ?dropLast + + : + bool + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    + + + ?targetDevice + + : + Device + +
    + + + ?targetDtype + + : + Dtype + +
    + + + ?targetBackend + + : + Backend + +
    +
+
+ + Returns: + DataLoader + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + dataset + + : + Dataset +
+
+
+ + batchSize + + : + int +
+
+
+ + ?shuffle + + : + bool +
+
+
+ + ?dropLast + + : + bool +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+ + ?targetDevice + + : + Device +
+
+
+ + ?targetDtype + + : + Dtype +
+
+
+ + ?targetBackend + + : + Backend +
+
+
+
+
+ + Returns: + + DataLoader +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.batch ?batchSize + + +

+
+
+
+ Full Usage: + this.batch ?batchSize +
+
+ Parameters: +
    + + + ?batchSize + + : + int + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?batchSize + + : + int +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.epoch ?numBatches + + +

+
+
+
+ Full Usage: + this.epoch ?numBatches +
+
+ Parameters: +
    + + + ?numBatches + + : + int + +
    +
+
+ + Returns: + seq<int * Tensor * Tensor> + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?numBatches + + : + int +
+
+
+
+
+ + Returns: + + seq<int * Tensor * Tensor> +
+
+
+
+
+ +

+ + + this.length + + +

+
+
+
+ Full Usage: + this.length +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-dataset.html b/reference/furnace-data-dataset.html new file mode 100644 index 00000000..a2c319c2 --- /dev/null +++ b/reference/furnace-data-dataset.html @@ -0,0 +1,850 @@ + + + + + Dataset (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Dataset Type +

+ +
+
+

+ Represents a dataset. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Dataset() + + +

+
+
+
+ Full Usage: + Dataset() +
+
+ + Returns: + Dataset + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Dataset +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + imin + + : + int option + +
    + + + imax + + : + int option + +
    +
+
+ + Returns: + DatasetSubset + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + imin + + : + int option +
+
+
+ + imax + + : + int option +
+
+
+
+
+ + Returns: + + DatasetSubset +
+
+
+
+
+ +

+ + + this.[i] + + +

+
+
+
+ Full Usage: + this.[i] +
+
+ Parameters: +
    + + + i + + : + int + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + i + + : + int +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.filter predicate + + +

+
+
+
+ Full Usage: + this.filter predicate +
+
+ Parameters: + +
+ + Returns: + DatasetSubset + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + predicate + + : + Tensor -> Tensor -> bool +
+
+
+
+
+ + Returns: + + DatasetSubset +
+
+
+
+
+ +

+ + + this.item arg1 + + +

+
+
+
+ Full Usage: + this.item arg1 +
+
+ Parameters: +
    + + + arg0 + + : + int + +
    +
+
+ + Returns: + Tensor * Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + arg0 + + : + int +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.length + + +

+
+
+
+ Full Usage: + this.length +
+
+ + Returns: + int + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.loader (batchSize, ?shuffle, ?dropLast, ?device, ?dtype, ?backend, ?targetDevice, ?targetDtype, ?targetBackend) + + +

+
+
+
+ Full Usage: + this.loader (batchSize, ?shuffle, ?dropLast, ?device, ?dtype, ?backend, ?targetDevice, ?targetDtype, ?targetBackend) +
+
+ Parameters: +
    + + + batchSize + + : + int + +
    + + + ?shuffle + + : + bool + +
    + + + ?dropLast + + : + bool + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    + + + ?targetDevice + + : + Device + +
    + + + ?targetDtype + + : + Dtype + +
    + + + ?targetBackend + + : + Backend + +
    +
+
+ + Returns: + DataLoader + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + batchSize + + : + int +
+
+
+ + ?shuffle + + : + bool +
+
+
+ + ?dropLast + + : + bool +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+ + ?targetDevice + + : + Device +
+
+
+ + ?targetDtype + + : + Dtype +
+
+
+ + ?targetBackend + + : + Backend +
+
+
+
+
+ + Returns: + + DataLoader +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-datasetsubset.html b/reference/furnace-data-datasetsubset.html new file mode 100644 index 00000000..fbad5190 --- /dev/null +++ b/reference/furnace-data-datasetsubset.html @@ -0,0 +1,274 @@ + + + + + DatasetSubset (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ DatasetSubset Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + DatasetSubset(dataset, indices) + + +

+
+
+
+ Full Usage: + DatasetSubset(dataset, indices) +
+
+ Parameters: +
    + + + dataset + + : + Dataset + +
    + + + indices + + : + int[] + +
    +
+
+ + Returns: + DatasetSubset + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + dataset + + : + Dataset +
+
+
+ + indices + + : + int[] +
+
+
+
+
+ + Returns: + + DatasetSubset +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-datautil.html b/reference/furnace-data-datautil.html new file mode 100644 index 00000000..96d6a0b9 --- /dev/null +++ b/reference/furnace-data-datautil.html @@ -0,0 +1,420 @@ + + + + + DataUtil (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ DataUtil Module +

+ +
+
+

+ + Contains auto-opened utilities related to the Furnace programming model. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + download url localFileName + + +

+
+
+
+ Full Usage: + download url localFileName +
+
+ Parameters: +
    + + + url + + : + string + +
    + + + localFileName + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Synchronously downloads the given URL to the given local file. + +

+
+
+
+ + url + + : + string +
+
+
+ + localFileName + + : + string +
+
+
+
+
+ +

+ + + extractTarGz fileName outputDir + + +

+
+
+
+ Full Usage: + extractTarGz fileName outputDir +
+
+ Parameters: +
    + + + fileName + + : + string + +
    + + + outputDir + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + fileName + + : + string +
+
+
+ + outputDir + + : + string +
+
+
+
+
+ +

+ + + extractTarStream stream outputDir + + +

+
+
+
+ Full Usage: + extractTarStream stream outputDir +
+
+ Parameters: +
    + + + stream + + : + Stream + +
    + + + outputDir + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + stream + + : + Stream +
+
+
+ + outputDir + + : + string +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-imagedataset.html b/reference/furnace-data-imagedataset.html new file mode 100644 index 00000000..78495308 --- /dev/null +++ b/reference/furnace-data-imagedataset.html @@ -0,0 +1,452 @@ + + + + + ImageDataset (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ImageDataset Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + ImageDataset(path, ?fileExtension, ?resize, ?transform, ?targetTransform) + + +

+
+
+
+ Full Usage: + ImageDataset(path, ?fileExtension, ?resize, ?transform, ?targetTransform) +
+
+ Parameters: +
    + + + path + + : + string + +
    + + + ?fileExtension + + : + string + +
    + + + ?resize + + : + int * int + +
    + + + ?transform + + : + Tensor -> Tensor + +
    + + + ?targetTransform + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + ImageDataset + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + path + + : + string +
+
+
+ + ?fileExtension + + : + string +
+
+
+ + ?resize + + : + int * int +
+
+
+ + ?transform + + : + Tensor -> Tensor +
+
+
+ + ?targetTransform + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + ImageDataset +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.classNames + + +

+
+
+
+ Full Usage: + this.classNames +
+
+ + Returns: + string[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + string[] +
+
+
+
+
+ +

+ + + this.classes + + +

+
+
+
+ Full Usage: + this.classes +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-mnist.html b/reference/furnace-data-mnist.html new file mode 100644 index 00000000..ae8ffcac --- /dev/null +++ b/reference/furnace-data-mnist.html @@ -0,0 +1,468 @@ + + + + + MNIST (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ MNIST Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + MNIST(path, ?urls, ?train, ?transform, ?targetTransform, ?n) + + +

+
+
+
+ Full Usage: + MNIST(path, ?urls, ?train, ?transform, ?targetTransform, ?n) +
+
+ Parameters: +
    + + + path + + : + string + +
    + + + ?urls + + : + seq<string> + +
    + + + ?train + + : + bool + +
    + + + ?transform + + : + Tensor -> Tensor + +
    + + + ?targetTransform + + : + Tensor -> Tensor + +
    + + + ?n + + : + int + +
    +
+
+ + Returns: + MNIST + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + path + + : + string +
+
+
+ + ?urls + + : + seq<string> +
+
+
+ + ?train + + : + bool +
+
+
+ + ?transform + + : + Tensor -> Tensor +
+
+
+ + ?targetTransform + + : + Tensor -> Tensor +
+
+
+ + ?n + + : + int +
+
+
+
+
+ + Returns: + + MNIST +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.classNames + + +

+
+
+
+ Full Usage: + this.classNames +
+
+ + Returns: + string[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + string[] +
+
+
+
+
+ +

+ + + this.classes + + +

+
+
+
+ Full Usage: + this.classes +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-tensordataset.html b/reference/furnace-data-tensordataset.html new file mode 100644 index 00000000..4f25dcae --- /dev/null +++ b/reference/furnace-data-tensordataset.html @@ -0,0 +1,274 @@ + + + + + TensorDataset (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ TensorDataset Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + TensorDataset(data, target) + + +

+
+
+
+ Full Usage: + TensorDataset(data, target) +
+
+ Parameters: +
    + + + data + + : + Tensor + +
    + + + target + + : + Tensor + +
    +
+
+ + Returns: + TensorDataset + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + data + + : + Tensor +
+
+
+ + target + + : + Tensor +
+
+
+
+
+ + Returns: + + TensorDataset +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data-textdataset.html b/reference/furnace-data-textdataset.html new file mode 100644 index 00000000..ff6c02b9 --- /dev/null +++ b/reference/furnace-data-textdataset.html @@ -0,0 +1,732 @@ + + + + + TextDataset (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ TextDataset Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + TextDataset(text, seqLength, ?chars) + + +

+
+
+
+ Full Usage: + TextDataset(text, seqLength, ?chars) +
+
+ Parameters: +
    + + + text + + : + string + +
    + + + seqLength + + : + int + +
    + + + ?chars + + : + string + +
    +
+
+ + Returns: + TextDataset + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + text + + : + string +
+
+
+ + seqLength + + : + int +
+
+
+ + ?chars + + : + string +
+
+
+
+
+ + Returns: + + TextDataset +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.charToIndex c + + +

+
+
+
+ Full Usage: + this.charToIndex c +
+
+ Parameters: +
    + + + c + + : + char + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + c + + : + char +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.chars + + +

+
+
+
+ Full Usage: + this.chars +
+
+ + Returns: + char[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + char[] +
+
+
+
+
+ +

+ + + this.indexToChar i + + +

+
+
+
+ Full Usage: + this.indexToChar i +
+
+ Parameters: +
    + + + i + + : + int + +
    +
+
+ + Returns: + char + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + i + + : + int +
+
+
+
+
+ + Returns: + + char +
+
+
+
+
+ +

+ + + this.numChars + + +

+
+
+
+ Full Usage: + this.numChars +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.tensorToText tensor + + +

+
+
+
+ Full Usage: + this.tensorToText tensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + this.textToTensor text + + +

+
+
+
+ Full Usage: + this.textToTensor text +
+
+ Parameters: +
    + + + text + + : + string + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + text + + : + string +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-data.html b/reference/furnace-data.html new file mode 100644 index 00000000..a9c59a80 --- /dev/null +++ b/reference/furnace-data.html @@ -0,0 +1,452 @@ + + + + + Furnace.Data + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Furnace.Data Namespace +

+
+

+ Contains datasets and components related to data loading. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type/Module + + Description +
+

+ + + CIFAR10 + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + CIFAR100 + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + DataLoader + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Dataset + + +

+
+
+ + + + + + +

+ Represents a dataset. +

+
+
+

+ + + DatasetSubset + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + DataUtil + + +

+
+
+ + + + + + +

+ + Contains auto-opened utilities related to the Furnace programming model. + +

+
+
+

+ + + ImageDataset + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + MNIST + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + TensorDataset + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + TextDataset + + +

+
+
+ + + + + + +

+ +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-device.html b/reference/furnace-device.html new file mode 100644 index 00000000..96fda7df --- /dev/null +++ b/reference/furnace-device.html @@ -0,0 +1,529 @@ + + + + + Device (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Device Type +

+ +
+
+

+ + Represents a device specification. + +

+
+
+
+
+
+
+

+ Union cases +

+ + + + + + + + + + + + + +
+ Union case + + Description +
+
+ +

+ + + Device(DeviceType, int) + + +

+
+
+
+ Full Usage: + Device(DeviceType, int) +
+
+ Parameters: +
    + + + Item1 + + : + DeviceType + +
    + + + Item2 + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + DeviceType +
+
+
+ + Item2 + + : + int +
+
+
+
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.DeviceIndex + + +

+
+
+
+ Full Usage: + this.DeviceIndex +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.DeviceType + + +

+
+
+
+ Full Usage: + this.DeviceType +
+
+ + Returns: + DeviceType + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + DeviceType +
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + Device.CPU + + +

+
+
+
+ Full Usage: + Device.CPU +
+
+ + Returns: + Device + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Device +
+
+
+
+
+ +

+ + + Device.GPU + + +

+
+
+
+ Full Usage: + Device.GPU +
+
+ + Returns: + Device + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Device +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-devicemodule.html b/reference/furnace-devicemodule.html new file mode 100644 index 00000000..bda63357 --- /dev/null +++ b/reference/furnace-devicemodule.html @@ -0,0 +1,236 @@ + + + + + Device (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Device Module +

+ +
+
+

+ + Contains functions and settings related to device specifications. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + Default + + +

+
+
+
+ Full Usage: + Default +
+
+ + Returns: + Device + +
+
+
+
+
+
+ + + + + + +

+ + Get or set the default device used when creating tensors. Note, use FurnaceImage.config(...) instead. + +

+
+
+
+ + Returns: + + Device +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-devicetype.html b/reference/furnace-devicetype.html new file mode 100644 index 00000000..3f54b1ee --- /dev/null +++ b/reference/furnace-devicetype.html @@ -0,0 +1,711 @@ + + + + + DeviceType (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ DeviceType Type +

+ +
+
+

+ + Represents the type of a device. + +

+
+

+ + The numeric values used are as for LibTorch. + +

+
+
+
+
+
+
+

+ Record fields +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Record Field + + Description +
+
+ +

+ + + CPU + + +

+
+
+
+ Full Usage: + CPU +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + CUDA + + +

+
+
+
+ Full Usage: + CUDA +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + FPGA + + +

+
+
+
+ Full Usage: + FPGA +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + HIP + + +

+
+
+
+ Full Usage: + HIP +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + IDEEP + + +

+
+
+
+ Full Usage: + IDEEP +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + MKLDNN + + +

+
+
+
+ Full Usage: + MKLDNN +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + MSNPU + + +

+
+
+
+ Full Usage: + MSNPU +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + OPENCL + + +

+
+
+
+ Full Usage: + OPENCL +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + OPENGL + + +

+
+
+
+ Full Usage: + OPENGL +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+ +

+ + + XLA + + +

+
+
+
+ Full Usage: + XLA +
+
+ + Field type: + DeviceType + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + DeviceType +
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions-bernoulli.html b/reference/furnace-distributions-bernoulli.html new file mode 100644 index 00000000..ee627e98 --- /dev/null +++ b/reference/furnace-distributions-bernoulli.html @@ -0,0 +1,404 @@ + + + + + Bernoulli (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Bernoulli Type +

+ +
+
+

+ Represents a Bernoulli distribution. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Bernoulli(?probs, ?logits) + + +

+
+
+
+ Full Usage: + Bernoulli(?probs, ?logits) +
+
+ Parameters: +
    + + + ?probs + + : + Tensor + +
    + + + ?logits + + : + Tensor + +
    +
+
+ + Returns: + Bernoulli + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?probs + + : + Tensor +
+
+
+ + ?logits + + : + Tensor +
+
+
+
+
+ + Returns: + + Bernoulli +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.logits + + +

+
+
+
+ Full Usage: + this.logits +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.probs + + +

+
+
+
+ Full Usage: + this.probs +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions-categorical.html b/reference/furnace-distributions-categorical.html new file mode 100644 index 00000000..7729108e --- /dev/null +++ b/reference/furnace-distributions-categorical.html @@ -0,0 +1,404 @@ + + + + + Categorical (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Categorical Type +

+ +
+
+

+ Represents a Categorial distribution. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Categorical(?probs, ?logits) + + +

+
+
+
+ Full Usage: + Categorical(?probs, ?logits) +
+
+ Parameters: +
    + + + ?probs + + : + Tensor + +
    + + + ?logits + + : + Tensor + +
    +
+
+ + Returns: + Categorical + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?probs + + : + Tensor +
+
+
+ + ?logits + + : + Tensor +
+
+
+
+
+ + Returns: + + Categorical +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.logits + + +

+
+
+
+ Full Usage: + this.logits +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.probs + + +

+
+
+
+ Full Usage: + this.probs +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions-distribution-1.html b/reference/furnace-distributions-distribution-1.html new file mode 100644 index 00000000..5be044b4 --- /dev/null +++ b/reference/furnace-distributions-distribution-1.html @@ -0,0 +1,394 @@ + + + + + Distribution<'T> (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Distribution<'T> Type +

+ +
+
+

+ Represents a distribution. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Distribution() + + +

+
+
+
+ Full Usage: + Distribution() +
+
+ + Returns: + Distribution<'T> + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Distribution<'T> +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.logprob arg1 + + +

+
+
+
+ Full Usage: + this.logprob arg1 +
+
+ Parameters: +
    + + + arg0 + + : + 'T + +
    +
+
+ + Returns: + Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Returns the log-probability of the distribution +

+
+
+
+ + arg0 + + : + 'T +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sample () + + +

+
+
+
+ Full Usage: + this.sample () +
+
+ + Returns: + 'T + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Samples the distribution +

+
+
+
+ + Returns: + + 'T +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions-empirical-1.html b/reference/furnace-distributions-empirical-1.html new file mode 100644 index 00000000..e00fe5af --- /dev/null +++ b/reference/furnace-distributions-empirical-1.html @@ -0,0 +1,1932 @@ + + + + + Empirical<'T> (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Empirical<'T> Type +

+ +
+
+

+ Represents an Empirical distribution. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Empirical(values, ?weights, ?logWeights, ?combineDuplicates, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + Empirical(values, ?weights, ?logWeights, ?combineDuplicates, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + values + + : + seq<'T> + +
    + + + ?weights + + : + Tensor + +
    + + + ?logWeights + + : + Tensor + +
    + + + ?combineDuplicates + + : + bool + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Empirical<'T> + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + values + + : + seq<'T> +
+
+
+ + ?weights + + : + Tensor +
+
+
+ + ?logWeights + + : + Tensor +
+
+
+ + ?combineDuplicates + + : + bool +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Empirical<'T> +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + start + + : + int option + +
    + + + finish + + : + int option + +
    +
+
+ + Returns: + Empirical<'T> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + start + + : + int option +
+
+
+ + finish + + : + int option +
+
+
+
+
+ + Returns: + + Empirical<'T> +
+
+
+
+
+ +

+ + + this.[i] + + +

+
+
+
+ Full Usage: + this.[i] +
+
+ Parameters: +
    + + + i + + : + int + +
    +
+
+ + Returns: + 'T * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + i + + : + int +
+
+
+
+
+ + Returns: + + 'T * Tensor +
+
+
+
+
+ +

+ + + this.combineDuplicates () + + +

+
+
+
+ Full Usage: + this.combineDuplicates () +
+
+ + Returns: + Empirical<'T> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Empirical<'T> +
+
+
+
+
+ +

+ + + this.effectiveSampleSize + + +

+
+
+
+ Full Usage: + this.effectiveSampleSize +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.expectation f + + +

+
+
+
+ Full Usage: + this.expectation f +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.filter predicate + + +

+
+
+
+ Full Usage: + this.filter predicate +
+
+ Parameters: +
    + + + predicate + + : + 'T -> bool + +
    +
+
+ + Returns: + Empirical<'T> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + predicate + + : + 'T -> bool +
+
+
+
+
+ + Returns: + + Empirical<'T> +
+
+
+
+
+ +

+ + + this.isWeighted + + +

+
+
+
+ Full Usage: + this.isWeighted +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.length + + +

+
+
+
+ Full Usage: + this.length +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.logWeights + + +

+
+
+
+ Full Usage: + this.logWeights +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.map f + + +

+
+
+
+ Full Usage: + this.map f +
+
+ Parameters: +
    + + + f + + : + 'T -> 'a + +
    +
+
+ + Returns: + Empirical<'a> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + 'T -> 'a +
+
+
+
+
+ + Returns: + + Empirical<'a> +
+
+
+
+
+ +

+ + + this.max + + +

+
+
+
+ Full Usage: + this.max +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mean + + +

+
+
+
+ Full Usage: + this.mean +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.min + + +

+
+
+
+ Full Usage: + this.min +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mode + + +

+
+
+
+ Full Usage: + this.mode +
+
+ + Returns: + 'T + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + 'T +
+
+
+
+
+ +

+ + + this.resample (numSamples, ?minIndex, ?maxIndex) + + +

+
+
+
+ Full Usage: + this.resample (numSamples, ?minIndex, ?maxIndex) +
+
+ Parameters: +
    + + + numSamples + + : + int + +
    + + + ?minIndex + + : + int + +
    + + + ?maxIndex + + : + int + +
    +
+
+ + Returns: + Empirical<'T> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + numSamples + + : + int +
+
+
+ + ?minIndex + + : + int +
+
+
+ + ?maxIndex + + : + int +
+
+
+
+
+ + Returns: + + Empirical<'T> +
+
+
+
+
+ +

+ + + this.sample (?minIndex, ?maxIndex) + + +

+
+
+
+ Full Usage: + this.sample (?minIndex, ?maxIndex) +
+
+ Parameters: +
    + + + ?minIndex + + : + int + +
    + + + ?maxIndex + + : + int + +
    +
+
+ + Returns: + 'T + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + ?minIndex + + : + int +
+
+
+ + ?maxIndex + + : + int +
+
+
+
+
+ + Returns: + + 'T +
+
+
+
+
+ +

+ + + this.stddev + + +

+
+
+
+ Full Usage: + this.stddev +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.thin (numSamples, ?minIndex, ?maxIndex) + + +

+
+
+
+ Full Usage: + this.thin (numSamples, ?minIndex, ?maxIndex) +
+
+ Parameters: +
    + + + numSamples + + : + int + +
    + + + ?minIndex + + : + int + +
    + + + ?maxIndex + + : + int + +
    +
+
+ + Returns: + Empirical<'T> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + numSamples + + : + int +
+
+
+ + ?minIndex + + : + int +
+
+
+ + ?maxIndex + + : + int +
+
+
+
+
+ + Returns: + + Empirical<'T> +
+
+
+
+
+ +

+ + + this.unweighted () + + +

+
+
+
+ Full Usage: + this.unweighted () +
+
+ + Returns: + Empirical<'T> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Empirical<'T> +
+
+
+
+
+ +

+ + + this.values + + +

+
+
+
+ Full Usage: + this.values +
+
+ + Returns: + 'T[] + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + 'T[] +
+
+
+
+
+ +

+ + + this.valuesTensor + + +

+
+
+
+ Full Usage: + this.valuesTensor +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.variance + + +

+
+
+
+ Full Usage: + this.variance +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.weights + + +

+
+
+
+ Full Usage: + this.weights +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions-normal.html b/reference/furnace-distributions-normal.html new file mode 100644 index 00000000..fb3afcbc --- /dev/null +++ b/reference/furnace-distributions-normal.html @@ -0,0 +1,274 @@ + + + + + Normal (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Normal Type +

+ +
+
+

+ Represents a normal distribution with the given mean and standard deviation with the mean and standard deviation drawn fom the given tensors. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Normal(mean, stddev) + + +

+
+
+
+ Full Usage: + Normal(mean, stddev) +
+
+ Parameters: +
    + + + mean + + : + Tensor + +
    + + + stddev + + : + Tensor + +
    +
+
+ + Returns: + Normal + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + mean + + : + Tensor +
+
+
+ + stddev + + : + Tensor +
+
+
+
+
+ + Returns: + + Normal +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions-tensordistribution.html b/reference/furnace-distributions-tensordistribution.html new file mode 100644 index 00000000..7667ca37 --- /dev/null +++ b/reference/furnace-distributions-tensordistribution.html @@ -0,0 +1,705 @@ + + + + + TensorDistribution (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ TensorDistribution Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + TensorDistribution() + + +

+
+
+
+ Full Usage: + TensorDistribution() +
+
+ + Returns: + TensorDistribution + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + TensorDistribution +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.batchShape + + +

+
+
+
+ Full Usage: + this.batchShape +
+
+ + Returns: + Shape + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Shape +
+
+
+
+
+ +

+ + + this.eventShape + + +

+
+
+
+ Full Usage: + this.eventShape +
+
+ + Returns: + Shape + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Shape +
+
+
+
+
+ +

+ + + this.mean + + +

+
+
+
+ Full Usage: + this.mean +
+
+ + Returns: + Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.prob value + + +

+
+
+
+ Full Usage: + this.prob value +
+
+ Parameters: +
    + + + value + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + value + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sample numSamples + + +

+
+
+
+ Full Usage: + this.sample numSamples +
+
+ Parameters: +
    + + + numSamples + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Samples the distribution mutliple times +

+
+
+
+ + numSamples + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.stddev + + +

+
+
+
+ Full Usage: + this.stddev +
+
+ + Returns: + Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.variance + + +

+
+
+
+ Full Usage: + this.variance +
+
+ + Returns: + Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions-uniform.html b/reference/furnace-distributions-uniform.html new file mode 100644 index 00000000..8c4205c4 --- /dev/null +++ b/reference/furnace-distributions-uniform.html @@ -0,0 +1,460 @@ + + + + + Uniform (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Uniform Type +

+ +
+
+

+ Represents a uniform distribution with low and high values drawn from the given tensors. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Uniform(low, high) + + +

+
+
+
+ Full Usage: + Uniform(low, high) +
+
+ Parameters: +
    + + + low + + : + Tensor + +
    + + + high + + : + Tensor + +
    +
+
+ + Returns: + Uniform + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + low + + : + Tensor +
+
+
+ + high + + : + Tensor +
+
+
+
+
+ + Returns: + + Uniform +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.high + + +

+
+
+
+ Full Usage: + this.high +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.low + + +

+
+
+
+ Full Usage: + this.low +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.range + + +

+
+
+
+ Full Usage: + this.range +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-distributions.html b/reference/furnace-distributions.html new file mode 100644 index 00000000..6871f968 --- /dev/null +++ b/reference/furnace-distributions.html @@ -0,0 +1,360 @@ + + + + + Furnace.Distributions + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Furnace.Distributions Namespace +

+
+

+ Contains types and functionality related to probabilitity distributions. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Description +
+

+ + + Bernoulli + + +

+
+
+ + + + + + +

+ Represents a Bernoulli distribution. +

+
+
+

+ + + Categorical + + +

+
+
+ + + + + + +

+ Represents a Categorial distribution. +

+
+
+

+ + + Distribution<'T> + + +

+
+
+ + + + + + +

+ Represents a distribution. +

+
+
+

+ + + Empirical<'T> + + +

+
+
+ + + + + + +

+ Represents an Empirical distribution. +

+
+
+

+ + + Normal + + +

+
+
+ + + + + + +

+ Represents a normal distribution with the given mean and standard deviation with the mean and standard deviation drawn fom the given tensors. +

+
+
+

+ + + TensorDistribution + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Uniform + + +

+
+
+ + + + + + +

+ Represents a uniform distribution with low and high values drawn from the given tensors. +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-dtype.html b/reference/furnace-dtype.html new file mode 100644 index 00000000..90d48ac8 --- /dev/null +++ b/reference/furnace-dtype.html @@ -0,0 +1,633 @@ + + + + + Dtype (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Dtype Type +

+ +
+
+

+ + Represents a storage type for elements of a tensor + +

+
+
+
+
+
+
+

+ Union cases +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Union case + + Description +
+
+ +

+ + + BFloat16 + + +

+
+
+
+ Full Usage: + BFloat16 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 16-bit floating point numbers (bfloat16 variation) + +

+
+
+
+ +

+ + + Bool + + +

+
+
+
+ Full Usage: + Bool +
+
+
+
+
+
+
+ + + + +

+ + Store elements as booleans + +

+
+
+
+ +

+ + + Byte + + +

+
+
+
+ Full Usage: + Byte +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 8-bit unsigned integers + +

+
+
+
+ +

+ + + Float16 + + +

+
+
+
+ Full Usage: + Float16 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 16-bit floating point numbers + +

+
+
+
+ +

+ + + Float32 + + +

+
+
+
+ Full Usage: + Float32 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 32-bit floating point numbers + +

+
+
+
+ +

+ + + Float64 + + +

+
+
+
+ Full Usage: + Float64 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 64-bit floating point numbers + +

+
+
+
+ +

+ + + Int16 + + +

+
+
+
+ Full Usage: + Int16 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 16-bit signed integers + +

+
+
+
+ +

+ + + Int32 + + +

+
+
+
+ Full Usage: + Int32 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 32-bit signed integers + +

+
+
+
+ +

+ + + Int64 + + +

+
+
+
+ Full Usage: + Int64 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 64-bit signed integers + +

+
+
+
+ +

+ + + Int8 + + +

+
+
+
+ Full Usage: + Int8 +
+
+
+
+
+
+
+ + + + +

+ + Store elements as 8-bit integers + +

+
+
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.SummationType + + +

+
+
+
+ Full Usage: + this.SummationType +
+
+ + Returns: + Dtype + +
+
+
+
+
+
+ + + + + + +

+ + Gets the natural result of the Sum(), SumToSize() and Sum(dim) operation on this dtype + +

+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-dtypeautoopens.html b/reference/furnace-dtypeautoopens.html new file mode 100644 index 00000000..75376c02 --- /dev/null +++ b/reference/furnace-dtypeautoopens.html @@ -0,0 +1,808 @@ + + + + + DtypeAutoOpens (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ DtypeAutoOpens Module +

+ +
+
+

+ + Contains global functions and settings related to tensor element types, used when writing backends. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + opNotSupported msg dtype + + +

+
+
+
+ Full Usage: + opNotSupported msg dtype +
+
+ Parameters: +
    + + + msg + + : + string + +
    + + + dtype + + : + Dtype + +
    +
+
+ + Returns: + 's + +
+
+
+
+
+
+ + + + + + +

+ + Raise an exception indicating the given operation is not supported for the given tensor element type. + +

+
+
+
+ + msg + + : + string +
+
+
+ + dtype + + : + Dtype +
+
+
+
+
+ + Returns: + + 's +
+
+
+
+
+ +

+ + + opNotSupported2 msg dtype1 dtype2 + + +

+
+
+
+ Full Usage: + opNotSupported2 msg dtype1 dtype2 +
+
+ Parameters: +
    + + + msg + + : + string + +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    +
+
+ + Returns: + 'a + +
+
+
+
+
+
+ + + + + + +

+ + Raise an exception indicating the given binary operation is not supported for the two given tensor element types. + +

+
+
+
+ + msg + + : + string +
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+
+
+ + Returns: + + 'a +
+
+
+
+
+ +

+ + + opNotSupportedOnDeviceType msg dtype deviceType + + +

+
+
+
+ Full Usage: + opNotSupportedOnDeviceType msg dtype deviceType +
+
+ Parameters: +
    + + + msg + + : + string + +
    + + + dtype + + : + Dtype + +
    + + + deviceType + + : + DeviceType + +
    +
+
+ + Returns: + 'a + +
+
+
+
+
+
+ + + + + + +

+ + Raise an exception indicating the given operation is not supported for the given tensor device type. + +

+
+
+
+ + msg + + : + string +
+
+
+ + dtype + + : + Dtype +
+
+
+ + deviceType + + : + DeviceType +
+
+
+
+
+ + Returns: + + 'a +
+
+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.IsFloatingPoint + + +

+
+
+
+ Full Usage: + this.IsFloatingPoint +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Matches all floating point tensor element types + +

+
+

+ Extended Type: + Dtype +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.IsFloatingPoint + + +

+
+
+
+ Full Usage: + this.IsFloatingPoint +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Matches all floating point tensor element types + +

+
+

+ Extended Type: + Dtype +

+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.IsIntegral + + +

+
+
+
+ Full Usage: + this.IsIntegral +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Matches all integral tensor element types + +

+
+

+ Extended Type: + Dtype +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.IsIntegral + + +

+
+
+
+ Full Usage: + this.IsIntegral +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Matches all integral tensor element types + +

+
+

+ Extended Type: + Dtype +

+
+
+ + Returns: + + bool +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-dtypemodule.html b/reference/furnace-dtypemodule.html new file mode 100644 index 00000000..e6b5e9c6 --- /dev/null +++ b/reference/furnace-dtypemodule.html @@ -0,0 +1,686 @@ + + + + + Dtype (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Dtype Module +

+ +
+
+

+ + Contains functions and settings related to tensor element types + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + Default + + +

+
+
+
+ Full Usage: + Default +
+
+ + Returns: + Dtype + +
+
+
+
+
+
+ + + + + + +

+ + Get or set the default element type used when creating tensors. Only floating point types are supported as the default type. Note, use FurnaceImage.config(...) instead. + +

+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + divisionType dtype1 dtype2 + + +

+
+
+
+ Full Usage: + divisionType dtype1 dtype2 +
+
+ Parameters: +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    +
+
+ + Returns: + Dtype + +
+
+
+
+
+
+ + + + + + +

+ + Find the Dtype which would result from dividing tensors with dtype1 and dtype2 + +

+
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + widen dtype1 dtype2 + + +

+
+
+
+ Full Usage: + widen dtype1 dtype2 +
+
+ Parameters: +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    +
+
+ + Returns: + Dtype option + +
+
+
+
+
+
+ + + + + + +

+ + Find the Dtype into which dtype1 and dtype2 can be widened + +

+
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+
+
+ + Returns: + + Dtype option +
+
+
+
+
+
+
+

+ Active patterns +

+ + + + + + + + + + + + + + + + + + + + + +
+ Active pattern + + Description +
+
+ +

+ + + (|FloatingPoint|_|) x + + +

+
+
+
+ Full Usage: + (|FloatingPoint|_|) x +
+
+ Parameters: +
    + + + x + + : + Dtype + +
    +
+
+ + Returns: + unit option + +
+
+
+
+
+
+ + + + + + +

+ + Matches all floating point tensor element types + +

+
+
+
+ + x + + : + Dtype +
+
+
+
+
+ + Returns: + + unit option +
+
+
+
+
+ +

+ + + (|IntegralOrBool|_|) x + + +

+
+
+
+ Full Usage: + (|IntegralOrBool|_|) x +
+
+ Parameters: +
    + + + x + + : + Dtype + +
    +
+
+ + Returns: + unit option + +
+
+
+
+
+
+ + + + + + +

+ + Matches all integral or boolean tensor element types + +

+
+
+
+ + x + + : + Dtype +
+
+
+
+
+ + Returns: + + unit option +
+
+
+
+
+ +

+ + + (|Integral|_|) x + + +

+
+
+
+ Full Usage: + (|Integral|_|) x +
+
+ Parameters: +
    + + + x + + : + Dtype + +
    +
+
+ + Returns: + unit option + +
+
+
+
+
+
+ + + + + + +

+ + Matches all integral tensor element types + +

+
+
+
+ + x + + : + Dtype +
+
+
+
+
+ + Returns: + + unit option +
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-furnaceimage.html b/reference/furnace-furnaceimage.html new file mode 100644 index 00000000..1819664e --- /dev/null +++ b/reference/furnace-furnaceimage.html @@ -0,0 +1,29579 @@ + + + + + FurnaceImage (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ FurnaceImage Type +

+ +
+
+

+ + Tensor operations + +

+
+
+
+
+
+
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + FurnaceImage.abs input + + +

+
+
+
+ Full Usage: + FurnaceImage.abs input +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes the element-wise absolute value of the given input tensor. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + input + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.acos input + + +

+
+
+
+ Full Usage: + FurnaceImage.acos input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the arccosine of the elements of input. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.add (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.add (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Return the element-wise addition of the two tensors. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.arange (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.arange (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + endVal + + : + int + - + The ending value for the set of points. + +
    + + + ?startVal + + : + int + - + The starting value for the set of points. Default: 0. + +
    + + + ?step + + : + int + - + The gap between each pair of adjacent points. Default: 1. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a 1-D tensor of size \(\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil\) + with values from the interval [start, end) taken with common difference step beginning from start. + +

+
+
+
+ + endVal + + : + int +
+
+

+ The ending value for the set of points. +

+
+
+ + ?startVal + + : + int +
+
+

+ The starting value for the set of points. Default: 0. +

+
+
+ + ?step + + : + int +
+
+

+ The gap between each pair of adjacent points. Default: 1. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.arange (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.arange (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + endVal + + : + float + - + The ending value for the set of points. + +
    + + + ?startVal + + : + float + - + The starting value for the set of points. Default: 0. + +
    + + + ?step + + : + float + - + The gap between each pair of adjacent points. Default: 1. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a 1-D tensor of size \(\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil\) + with values from the interval [start, end) taken with common difference step beginning from start. + +

+
+

+ + Non-integer steps may be subject to floating point rounding errors when comparing against end. + +

+
+
+ + endVal + + : + float +
+
+

+ The ending value for the set of points. +

+
+
+ + ?startVal + + : + float +
+
+

+ The starting value for the set of points. Default: 0. +

+
+
+ + ?step + + : + float +
+
+

+ The gap between each pair of adjacent points. Default: 1. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.arangeLike (input, endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.arangeLike (input, endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + endVal + + : + int + - + The ending value for the set of points. + +
    + + + ?startVal + + : + int + - + The starting value for the set of points. Default: 0. + +
    + + + ?step + + : + int + - + The gap between each pair of adjacent points. Default: 1. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + A version of FurnaceImage.arange with characteristics based on the input tensor. + +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + endVal + + : + int +
+
+

+ The ending value for the set of points. +

+
+
+ + ?startVal + + : + int +
+
+

+ The starting value for the set of points. Default: 0. +

+
+
+ + ?step + + : + int +
+
+

+ The gap between each pair of adjacent points. Default: 1. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.arangeLike (input, endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.arangeLike (input, endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + endVal + + : + float + - + The ending value for the set of points. + +
    + + + ?startVal + + : + float + - + The starting value for the set of points. Default: 0. + +
    + + + ?step + + : + float + - + The gap between each pair of adjacent points. Default: 1. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + A version of FurnaceImage.arange with characteristics based on the input tensor. + +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + endVal + + : + float +
+
+

+ The ending value for the set of points. +

+
+
+ + ?startVal + + : + float +
+
+

+ The starting value for the set of points. Default: 0. +

+
+
+ + ?step + + : + float +
+
+

+ The gap between each pair of adjacent points. Default: 1. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.argmax (input, dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.argmax (input, dim, ?keepDim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the indices of the maximum value of all elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.argmax input + + +

+
+
+
+ Full Usage: + FurnaceImage.argmax input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ Returns the indices of the maximum value of all elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + FurnaceImage.argmin (input, dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.argmin (input, dim, ?keepDim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the indices of the minimum value of all elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.argmin input + + +

+
+
+
+ Full Usage: + FurnaceImage.argmin input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ Returns the indices of the minimum value of all elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + FurnaceImage.asin input + + +

+
+
+
+ Full Usage: + FurnaceImage.asin input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the arcsine of the elements of input. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.atan input + + +

+
+
+
+ Full Usage: + FurnaceImage.atan input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the arctangent of the elements of input. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.backends () + + +

+
+
+
+ Full Usage: + FurnaceImage.backends () +
+
+ + Returns: + Backend list + +
+
+
+
+
+
+ + + + + + +

+ Returns the list of available backends. +

+
+
+
+ + Returns: + + Backend list +
+
+
+
+
+ +

+ + + FurnaceImage.backendsAndDevices () + + +

+
+
+
+ Full Usage: + FurnaceImage.backendsAndDevices () +
+
+ + Returns: + (Backend * Device list) list + +
+
+
+
+
+
+ + + + + + +

+ Returns the list of available backends and devices available for each backend. +

+
+
+
+ + Returns: + + (Backend * Device list) list +
+
+
+
+
+ +

+ + + FurnaceImage.bceLoss (input, target, ?weight, ?reduction) + + +

+
+
+
+ Full Usage: + FurnaceImage.bceLoss (input, target, ?weight, ?reduction) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?weight + + : + Tensor + - + A manual rescaling weight given to the loss of each batch element. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Creates a criterion that measures the Binary Cross Entropy between the target and the output +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?weight + + : + Tensor +
+
+

+ A manual rescaling weight given to the loss of each batch element. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.bernoulli (probs, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.bernoulli (probs, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + probs + + : + Tensor + - + The input tensor of probability values for the Bernoulli distribution. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Draws binary random numbers (0 or 1) from a Bernoulli distribution +

+
+
+
+ + probs + + : + Tensor +
+
+

+ The input tensor of probability values for the Bernoulli distribution. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.cast (input, dtype) + + +

+
+
+
+ Full Usage: + FurnaceImage.cast (input, dtype) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dtype + + : + Dtype + - + The desired element type of returned tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Convert the tensor to one with the given element type. +

+
+

+ If the element type is unchanged the input tensor will be returned. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.cat (tensors, ?dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.cat (tensors, ?dim) +
+
+ Parameters: +
    + + + tensors + + : + seq<Tensor> + - + The sequence of tensors to concatenate. + +
    + + + ?dim + + : + int + - + The the dimension over which the tensors are concatenated. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Concatenates the given sequence of seq tensors in the given dimension. All tensors must either have the same shape (except in the concatenating dimension) or be empty. +

+
+
+
+ + tensors + + : + seq<Tensor> +
+
+

+ The sequence of tensors to concatenate. +

+
+
+ + ?dim + + : + int +
+
+

+ The the dimension over which the tensors are concatenated. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ceil input + + +

+
+
+
+ Full Usage: + FurnaceImage.ceil input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the ceil of the elements of input, the smallest integer greater than or equal to each element. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.clamp (input, ?low, ?high) + + +

+
+
+
+ Full Usage: + FurnaceImage.clamp (input, ?low, ?high) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?low + + : + scalar + - + The lower-bound of the range to be clamped to. + +
    + + + ?high + + : + scalar + - + The upper-bound of the range to be clamped to. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Clamp all elements in input into the range [ low..high] and return a resulting tensor +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?low + + : + scalar +
+
+

+ The lower-bound of the range to be clamped to. +

+
+
+ + ?high + + : + scalar +
+
+

+ The upper-bound of the range to be clamped to. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.clone input + + +

+
+
+
+ Full Usage: + FurnaceImage.clone input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the same characteristics and storage cloned. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.config configuration + + +

+
+
+
+ Full Usage: + FurnaceImage.config configuration +
+
+ Parameters: +
    + + + configuration + + : + Device * Dtype * Backend * Printer + - + A tuple of the new default device, default element type, default backend, and default printer. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Configure the default device, element type, backend, printer. Only floating point dtypes are supported as the default. +

+
+
+
+ + configuration + + : + Device * Dtype * Backend * Printer +
+
+

+ A tuple of the new default device, default element type, default backend, and default printer. +

+
+
+
+
+ +

+ + + FurnaceImage.config () + + +

+
+
+
+ Full Usage: + FurnaceImage.config () +
+
+ + Returns: + Device * Dtype * Backend * Printer + +
+
+
+
+
+
+ + + + + + +

+ Return the current default device, element type, backend, and printer. +

+
+
+
+ + Returns: + + Device * Dtype * Backend * Printer +
+
+
+
+
+ +

+ + + FurnaceImage.config (?device, ?dtype, ?backend, ?printer) + + +

+
+
+
+ Full Usage: + FurnaceImage.config (?device, ?dtype, ?backend, ?printer) +
+
+ Parameters: +
    + + + ?device + + : + Device + - + The new default device. + +
    + + + ?dtype + + : + Dtype + - + The new default element type. Only floating point dtypes are supported as the default. + +
    + + + ?backend + + : + Backend + - + The new default backend. + +
    + + + ?printer + + : + Printer + - + The new default printer. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Configure the default device, dtype, and/or backend. +

+
+
+
+ + ?device + + : + Device +
+
+

+ The new default device. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The new default element type. Only floating point dtypes are supported as the default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The new default backend. +

+
+
+ + ?printer + + : + Printer +
+
+

+ The new default printer. +

+
+
+
+
+ +

+ + + FurnaceImage.conv1d (input, filters, ?stride, ?padding, ?dilation) + + +

+
+
+
+ Full Usage: + FurnaceImage.conv1d (input, filters, ?stride, ?padding, ?dilation) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit paddings on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D convolution over an input signal composed of several input planes +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit paddings on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.conv2d (input, filters, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) + + +

+
+
+
+ Full Usage: + FurnaceImage.conv2d (input, filters, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on corresponding sides of the input. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D convolution over an input signal composed of several input planes +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on corresponding sides of the input. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.conv3d (input, filters, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) + + +

+
+
+
+ Full Usage: + FurnaceImage.conv3d (input, filters, ?stride, ?strides, ?padding, ?paddings, ?dilation, ?dilations) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on corresponding sides of the input. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D convolution over an input signal composed of several input planes +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on corresponding sides of the input. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.convTranspose1d (input, filters, ?stride, ?padding, ?dilation, ?outputPadding) + + +

+
+
+
+ Full Usage: + FurnaceImage.convTranspose1d (input, filters, ?stride, ?padding, ?dilation, ?outputPadding) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?outputPadding + + : + int + - + The additional size added to one side of each dimension in the output shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called 'deconvolution'. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?outputPadding + + : + int +
+
+

+ The additional size added to one side of each dimension in the output shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.convTranspose2d (input, filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.convTranspose2d (input, filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?outputPadding + + : + int + - + The additional size added to one side of each dimension in the output shape. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    + + + ?outputPaddings + + : + seq<int> + - + The additional sizes added to one side of each dimension in the output shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D transposed convolution operator over an input signal composed of several input planes, sometimes also called 'deconvolution'. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?outputPadding + + : + int +
+
+

+ The additional size added to one side of each dimension in the output shape. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+ + ?outputPaddings + + : + seq<int> +
+
+

+ The additional sizes added to one side of each dimension in the output shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.convTranspose3d (input, filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.convTranspose3d (input, filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?outputPadding + + : + int + - + The additional size added to one side of each dimension in the output shape. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    + + + ?outputPaddings + + : + seq<int> + - + The additional sizes added to one side of each dimension in the output shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D transposed convolution operator over an input signal composed of several input planes, sometimes also called 'deconvolution'. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?outputPadding + + : + int +
+
+

+ The additional size added to one side of each dimension in the output shape. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+ + ?outputPaddings + + : + seq<int> +
+
+

+ The additional sizes added to one side of each dimension in the output shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.corrcoef input + + +

+
+
+
+ Full Usage: + FurnaceImage.corrcoef input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + + + The correlation coefficient matrix \(R\) is computed from the covariance + matrix + Returns a square tensor representing the correlation coefficient matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(R_{i,j}\) entry on the correlation matrix is the correlation between + \(x_i\) and \(x_j\). + +
+
+
+
+
+
+ + + + + + +

+ + Estimates the Pearson correlation coefficient matrix for the given tensor. The tensor's first + dimension should index variables and the second dimension should + index observations for each variable. + +

+
+

+ + The correlation between variables \(x\) and \(y\) is + \[cor(x,y)= \frac{\sum^{N}_{i = 1}(x_{i} - \mu_x)(y_{i} - \mu_y)}{\sigma_x \sigma_y (N ~-~1)}\] + where \(\mu_x\) and \(\mu_y\) are the sample means and \(\sigma_x\) and \(\sigma_x\) are + the sample standard deviations. + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ + The correlation coefficient matrix \(R\) is computed from the covariance + matrix + Returns a square tensor representing the correlation coefficient matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(R_{i,j}\) entry on the correlation matrix is the correlation between + \(x_i\) and \(x_j\). + +

+
+
+
+ Example +
+

+

+ let x = FurnaceImage.tensor([-0.2678; -0.0908; -0.3766;  0.2780])
+ let y = FurnaceImage.tensor([-0.5812;  0.1535;  0.2387;  0.2350])
+ let xy = FurnaceImage.stack([x;y])
+ FurnaceImage.corrcoef(xy)
+ Evaluates to +
+ tensor([[1.0000, 0.3582],
+         [0.3582, 1.0000]])
+

+
+
+ +

+ + + FurnaceImage.cos input + + +

+
+
+
+ Full Usage: + FurnaceImage.cos input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the cosine of the elements of input +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.cosh input + + +

+
+
+
+ Full Usage: + FurnaceImage.cosh input +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the hyperbolic cosine of the elements of input. +

+
+
+
+ + input + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.cov (input, ?correction, ?fweights, ?aweights) + + +

+
+
+
+ Full Usage: + FurnaceImage.cov (input, ?correction, ?fweights, ?aweights) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?correction + + : + int64 + - + Difference between the sample size and the sample degrees of freedom. Defaults to 1 (Bessel's correction). + +
    + + + ?fweights + + : + Tensor + - + Frequency weights represent the number of times each observation was observed. + Should be given as a tensor of integers. Defaults to no weights. + +
    + + + ?aweights + + : + Tensor + - + Relative importance weights, larger weights for observations that + should have a larger effect on the estimate. + Should be given as a tensor of floating point numbers. Defaults to no weights. + +
    +
+
+ + Returns: + Tensor + + Returns a square tensor representing the covariance matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(C_{i,j}\) entry on the covariance matrix is the covariance between + \(x_i\) and \(x_j\). + +
+
+
+
+
+
+ + + + + + +

+ + Estimates the covariance matrix of the given tensor. The tensor's first + dimension should index variables and the second dimension should + index observations for each variable. + +

+
+

+ + If no weights are given, the covariance between variables \(x\) and \(y\) is + \[cov(x,y)= \frac{\sum^{N}_{i = 1}(x_{i} - \mu_x)(y_{i} - \mu_y)}{N~-~\text{correction}}\] + where \(\mu_x\) and \(\mu_y\) are the sample means. + + If there are fweights or aweights then the covariance is + \[cov(x,y)=\frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}{\text{normalization factor}}\] + where \(w\) is either fweights or aweights if one weight type is provided. + If both weight types are provided \(w=\text{fweights}\times\text{aweights}\). + \(\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}\) + is the weighted mean of variables. + The normalization factor is \(\sum^{N}_{i=1} w_i\) if only fweights are provided or if aweights are provided and correction=0. + Otherwise if aweights \(aw\) are provided the normalization factor is + \(\sum^N_{i=1} w_i - \text{correction}\times\frac{\sum^N_{i=1} w_i aw_i}{\sum^N_{i=1} w_i}\) + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?correction + + : + int64 +
+
+

+ Difference between the sample size and the sample degrees of freedom. Defaults to 1 (Bessel's correction). +

+
+
+ + ?fweights + + : + Tensor +
+
+

+ Frequency weights represent the number of times each observation was observed. + Should be given as a tensor of integers. Defaults to no weights. +

+
+
+ + ?aweights + + : + Tensor +
+
+

+ Relative importance weights, larger weights for observations that + should have a larger effect on the estimate. + Should be given as a tensor of floating point numbers. Defaults to no weights. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ Returns a square tensor representing the covariance matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(C_{i,j}\) entry on the covariance matrix is the covariance between + \(x_i\) and \(x_j\). + +

+
+
+
+ Example +
+

+

+ let x = FurnaceImage.tensor([0.0;3.4;5.0])
+ let y = FurnaceImage.tensor([1.0;2.3;-3.0])
+ let xy = FurnaceImage.stack([x;y])
+ xy.cov()
+ Evaluates to +
+ tensor([[ 6.5200, -4.0100],
+         [-4.0100,  7.6300]])
+

+
+
+ +

+ + + FurnaceImage.create count value + + +

+
+
+
+ Full Usage: + FurnaceImage.create count value +
+
+ Parameters: +
    + + + count + + : + int + - + The number of elements in the tensor. + +
    + + + value + + : + 'a + - + The initial value for each element of the tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Create a new 1D tensor using the given value for each element. +

+
+
+
+ + count + + : + int +
+
+

+ The number of elements in the tensor. +

+
+
+ + value + + : + 'a +
+
+

+ The initial value for each element of the tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.crossEntropyLoss (input, target, ?weight, ?reduction) + + +

+
+
+
+ Full Usage: + FurnaceImage.crossEntropyLoss (input, target, ?weight, ?reduction) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?weight + + : + Tensor + - + A optional manual rescaling weight given to the loss of each batch element. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ This criterion combines logsoftmax and nllLoss in a single function +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?weight + + : + Tensor +
+
+

+ A optional manual rescaling weight given to the loss of each batch element. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.curl f x + + +

+
+
+
+ Full Usage: + FurnaceImage.curl f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.curldivergence f x + + +

+
+
+
+ Full Usage: + FurnaceImage.curldivergence f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.derivative tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.derivative tensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Get the derivative value of the tensor. +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.devices (?backend, ?deviceType) + + +

+
+
+
+ Full Usage: + FurnaceImage.devices (?backend, ?deviceType) +
+
+ Parameters: +
    + + + ?backend + + : + Backend + - + Return information for this backend. Defaults to Backend.Default. + +
    + + + ?deviceType + + : + DeviceType + - + If given, only return devices for this device type. + +
    +
+
+ + Returns: + Device list + +
+
+
+
+
+
+ + + + + + +

+ Returns the list of available devices for a given backend. +

+
+
+
+ + ?backend + + : + Backend +
+
+

+ Return information for this backend. Defaults to Backend.Default. +

+
+
+ + ?deviceType + + : + DeviceType +
+
+

+ If given, only return devices for this device type. +

+
+
+
+
+ + Returns: + + Device list +
+
+
+
+
+ +

+ + + FurnaceImage.diagonal (input, ?offset, ?dim1, ?dim2) + + +

+
+
+
+ Full Usage: + FurnaceImage.diagonal (input, ?offset, ?dim1, ?dim2) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. Must be at least 2-dimensional. + +
    + + + ?offset + + : + int + - + Which diagonal to consider. Default: 0. + +
    + + + ?dim1 + + : + int + - + The first dimension with respect to which to take diagonal. Default: 0.. + +
    + + + ?dim2 + + : + int + - + The second dimension with respect to which to take diagonal. Default: 1. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor with the diagonal elements with respect to dim1 and dim2. + The argument offset controls which diagonal to consider. + +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. Must be at least 2-dimensional. +

+
+
+ + ?offset + + : + int +
+
+

+ Which diagonal to consider. Default: 0. +

+
+
+ + ?dim1 + + : + int +
+
+

+ The first dimension with respect to which to take diagonal. Default: 0.. +

+
+
+ + ?dim2 + + : + int +
+
+

+ The second dimension with respect to which to take diagonal. Default: 1. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.diff f x + + +

+
+
+
+ Full Usage: + FurnaceImage.diff f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.diff2 f x + + +

+
+
+
+ Full Usage: + FurnaceImage.diff2 f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.diffn n f x + + +

+
+
+
+ Full Usage: + FurnaceImage.diffn n f x +
+
+ Parameters: +
    + + + n + + : + int + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + n + + : + int +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dilate (input, dilations) + + +

+
+
+
+ Full Usage: + FurnaceImage.dilate (input, dilations) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dilations + + : + seq<int> + - + The dilations to use. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Dilate the tensor in using the given dilations in each corresponding dimension. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dilations + + : + seq<int> +
+
+

+ The dilations to use. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.div (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.div (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Return the element-wise division of the two tensors. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.divergence f x + + +

+
+
+
+ Full Usage: + FurnaceImage.divergence f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dot (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.dot (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes the dot product (inner product) of two tensors. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dropout (input, ?p) + + +

+
+
+
+ Full Usage: + FurnaceImage.dropout (input, ?p) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?p + + : + double + - + The probability of an element to be zeroed. Default: 0.5. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?p + + : + double +
+
+

+ The probability of an element to be zeroed. Default: 0.5. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dropout2d (input, ?p) + + +

+
+
+
+ Full Usage: + FurnaceImage.dropout2d (input, ?p) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?p + + : + double + - + The probability of an element to be zeroed. Default: 0.5. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Randomly zero out entire channels (a channel is a 2D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 2D tensor \text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?p + + : + double +
+
+

+ The probability of an element to be zeroed. Default: 0.5. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.dropout3d (input, ?p) + + +

+
+
+
+ Full Usage: + FurnaceImage.dropout3d (input, ?p) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?p + + : + double + - + The probability of an element to be zeroed. Default: 0.5. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Randomly zero out entire channels (a channel is a 3D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 3D tensor \text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?p + + : + double +
+
+

+ The probability of an element to be zeroed. Default: 0.5. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.empty (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.empty (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new empty tensor holding no data, for the given element type and configuration +

+
+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.empty (length, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.empty (length, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new uninitialized tensor filled with arbitrary values for the given length, element type and configuration +

+
+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.empty (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.empty (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new uninitialized tensor filled with arbitrary values for the given shape, element type and configuration +

+
+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.eq (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.eq (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean tensor for the element-wise equality comparison of the elements in the two tensors. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.evalForwardDiff f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.evalForwardDiff f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.evalForwardDiffs f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.evalForwardDiffs f x v +
+
+ Parameters: + +
+ + Returns: + Tensor[] + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor[] +
+
+
+
+
+ + Returns: + + Tensor[] +
+
+
+
+
+ +

+ + + FurnaceImage.evalReverseDiff f x + + +

+
+
+
+ Full Usage: + FurnaceImage.evalReverseDiff f x +
+
+ Parameters: + +
+ + Returns: + Tensor * (Tensor -> Tensor) + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * (Tensor -> Tensor) +
+
+
+
+
+ +

+ + + FurnaceImage.exp input + + +

+
+
+
+ Full Usage: + FurnaceImage.exp input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the exp function element-wise. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.expand (input, shape) + + +

+
+
+
+ Full Usage: + FurnaceImage.expand (input, shape) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new view of the input tensor with singleton dimensions expanded to a larger size +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.expandAs (input, other) + + +

+
+
+
+ Full Usage: + FurnaceImage.expandAs (input, other) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + other + + : + Tensor + - + The result tensor has the same size as other. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Expand the input tensor to the same size as other tensor +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + other + + : + Tensor +
+
+

+ The result tensor has the same size as other. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.eye (rows, ?cols, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.eye (rows, ?cols, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + rows + + : + int + - + The number of rows + +
    + + + ?cols + + : + int + - + The number of columns with default being n + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. +

+
+
+
+ + rows + + : + int +
+
+

+ The number of rows +

+
+
+ + ?cols + + : + int +
+
+

+ The number of columns with default being n +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fcurl f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fcurl f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fcurldivergence f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fcurldivergence f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fdiff f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fdiff f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fdiff2 f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fdiff2 f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fdiffn n f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fdiffn n f x +
+
+ Parameters: +
    + + + n + + : + int + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + n + + : + int +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fdivergence f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fdivergence f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ffdiffn n f x + + +

+
+
+
+ Full Usage: + FurnaceImage.ffdiffn n f x +
+
+ Parameters: +
    + + + n + + : + int + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor[] + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + n + + : + int +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor[] +
+
+
+
+
+ +

+ + + FurnaceImage.fgrad f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fgrad f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fgradhessian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fgradhessian f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fgradhessianv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fgradhessianv f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fgradv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fgradv f x v +
+
+ Parameters: +
    + + + f + + : + Tensor -> Tensor + - + TBD + +
    + + + x + + : + Tensor + - + TBD + +
    + + + v + + : + Tensor + - + TBD + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ The x and v tensors should have the same number of elements. +

+
+
+ + f + + : + Tensor -> Tensor +
+
+

+ TBD +

+
+
+ + x + + : + Tensor +
+
+

+ TBD +

+
+
+ + v + + : + Tensor +
+
+

+ TBD +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fhessian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fhessian f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fhessianv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fhessianv f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fjacobian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fjacobian f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fjacobianTv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fjacobianTv f x v +
+
+ Parameters: +
    + + + f + + : + Tensor -> Tensor + - + vector-to-vector function + +
    + + + x + + : + Tensor + - + Point at which the function f will be evaluated, it must have a single dimension. + +
    + + + v + + : + Tensor + - + Vector + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Original value and transposed Jacobian-vector product of a vector-to-vector function `f`, at point `x`, along vector `v` +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+

+ vector-to-vector function +

+
+
+ + x + + : + Tensor +
+
+

+ Point at which the function f will be evaluated, it must have a single dimension. +

+
+
+ + v + + : + Tensor +
+
+

+ Vector +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fjacobianv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fjacobianv f x v +
+
+ Parameters: +
    + + + f + + : + Tensor -> Tensor + - + TBD + +
    + + + x + + : + Tensor + - + TBD + +
    + + + v + + : + Tensor + - + TBD + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ The x and v tensors should have the same number of elements. +

+
+
+ + f + + : + Tensor -> Tensor +
+
+

+ TBD +

+
+
+ + x + + : + Tensor +
+
+

+ TBD +

+
+
+ + v + + : + Tensor +
+
+

+ TBD +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.flaplacian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.flaplacian f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.flatten (input, ?startDim, ?endDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.flatten (input, ?startDim, ?endDim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?startDim + + : + int + - + The first dim to flatten. + +
    + + + ?endDim + + : + int + - + The last dim to flatten. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Flattens a contiguous range of dims in a tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?startDim + + : + int +
+
+

+ The first dim to flatten. +

+
+
+ + ?endDim + + : + int +
+
+

+ The last dim to flatten. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.flip (input, dims) + + +

+
+
+
+ Full Usage: + FurnaceImage.flip (input, dims) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dims + + : + seq<int> + - + The axis to flip on. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Reverse the order of a n-D tensor along given axis in dims +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dims + + : + seq<int> +
+
+

+ The axis to flip on. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.floor input + + +

+
+
+
+ Full Usage: + FurnaceImage.floor input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the floor of the elements of input, the largest integer less than or equal to each element. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.forwardDiff nestingTag derivative tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.forwardDiff nestingTag derivative tensor +
+
+ Parameters: +
    + + + nestingTag + + : + uint32 + - + The level tag. + +
    + + + derivative + + : + Tensor + - + The derivative of the input. + +
    + + + tensor + + : + Tensor + - + The input. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor suitable for calculating the forward-mode derivative at the given level tag. +

+
+
+
+ + nestingTag + + : + uint32 +
+
+

+ The level tag. +

+
+
+ + derivative + + : + Tensor +
+
+

+ The derivative of the input. +

+
+
+ + tensor + + : + Tensor +
+
+

+ The input. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.full (length, value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.full (length, value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + value + + : + scalar + - + The scalar giving the the initial values for the tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor of the given length filled with value, for the given element type and configuration +

+
+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + value + + : + scalar +
+
+

+ The scalar giving the the initial values for the tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.full (shape, value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.full (shape, value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    + + + value + + : + scalar + - + The scalar used to form the initial values for the tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor filled with the scalar value, for the given shape, element type and configuration +

+
+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+ + value + + : + scalar +
+
+

+ The scalar used to form the initial values for the tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fullLike (input, value, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.fullLike (input, value, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + value + + : + scalar + - + The scalar giving the the initial values for the tensor. + +
    + + + ?shape + + : + seq<int> + - + The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor filled with the given scalar value with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + value + + : + scalar +
+
+

+ The scalar giving the the initial values for the tensor. +

+
+
+ + ?shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gather (input, dim, indices) + + +

+
+
+
+ Full Usage: + FurnaceImage.gather (input, dim, indices) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The axis along which to index. + +
    + + + indices + + : + Tensor + - + The the indices of elements to gather. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Gathers values along an axis specified by dim. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The axis along which to index. +

+
+
+ + indices + + : + Tensor +
+
+

+ The the indices of elements to gather. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ge (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.ge (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean tensor for the element-wise greater-than-or-equal comparison of the elements in the two tensors. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.grad f x + + +

+
+
+
+ Full Usage: + FurnaceImage.grad f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gradhessian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.gradhessian f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gradhessianv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.gradhessianv f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gradv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.gradv f x v +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gt (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.gt (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean tensor for the element-wise greater-than comparison of the elements in the two tensors. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.hasinf input + + +

+
+
+
+ Full Usage: + FurnaceImage.hasinf input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean indicating if any element of the tensor is infinite. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + FurnaceImage.hasnan input + + +

+
+
+
+ Full Usage: + FurnaceImage.hasnan input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean indicating if any element of the tensor is a not-a-number (NaN) value. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + FurnaceImage.hessian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.hessian f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.hessianv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.hessianv f x v +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.init count initializer + + +

+
+
+
+ Full Usage: + FurnaceImage.init count initializer +
+
+ Parameters: +
    + + + count + + : + int + - + The length of the tensor. + +
    + + + initializer + + : + int -> 'a + - + The function used to initialize each element. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Create a new 1D tensor using the given initializer for each element. +

+
+
+
+ + count + + : + int +
+
+

+ The length of the tensor. +

+
+
+ + initializer + + : + int -> 'a +
+
+

+ The function used to initialize each element. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.init2d length1 length2 initializer + + +

+
+
+
+ Full Usage: + FurnaceImage.init2d length1 length2 initializer +
+
+ Parameters: +
    + + + length1 + + : + int + - + The length of the tensor in the first dimension. + +
    + + + length2 + + : + int + - + The length of the tensor in the second dimension. + +
    + + + initializer + + : + int -> int -> 'a + - + The function used to initialize each element. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Create a new 2D tensor using the given initializer for each element. +

+
+
+
+ + length1 + + : + int +
+
+

+ The length of the tensor in the first dimension. +

+
+
+ + length2 + + : + int +
+
+

+ The length of the tensor in the second dimension. +

+
+
+ + initializer + + : + int -> int -> 'a +
+
+

+ The function used to initialize each element. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.init3d length1 length2 length3 initializer + + +

+
+
+
+ Full Usage: + FurnaceImage.init3d length1 length2 length3 initializer +
+
+ Parameters: +
    + + + length1 + + : + int + - + The length of the tensor in the 1st dimension. + +
    + + + length2 + + : + int + - + The length of the tensor in the 2nd dimension. + +
    + + + length3 + + : + int + - + The length of the tensor in the 3rd dimension. + +
    + + + initializer + + : + int -> int -> int -> 'a + - + The function used to initialize each element. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Create a new 3D tensor using the given initializer for each element. +

+
+
+
+ + length1 + + : + int +
+
+

+ The length of the tensor in the 1st dimension. +

+
+
+ + length2 + + : + int +
+
+

+ The length of the tensor in the 2nd dimension. +

+
+
+ + length3 + + : + int +
+
+

+ The length of the tensor in the 3rd dimension. +

+
+
+ + initializer + + : + int -> int -> int -> 'a +
+
+

+ The function used to initialize each element. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.init4d length1 length2 length3 length4 initializer + + +

+
+
+
+ Full Usage: + FurnaceImage.init4d length1 length2 length3 length4 initializer +
+
+ Parameters: +
    + + + length1 + + : + int + - + The length of the tensor in the 1st dimension. + +
    + + + length2 + + : + int + - + The length of the tensor in the 2nd dimension. + +
    + + + length3 + + : + int + - + The length of the tensor in the 3rd dimension. + +
    + + + length4 + + : + int + - + The length of the tensor in the 4th dimension. + +
    + + + initializer + + : + int -> int -> int -> int -> 'a + - + The function used to initialize each element. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Create a new 4D tensor using the given initializer for each element. +

+
+
+
+ + length1 + + : + int +
+
+

+ The length of the tensor in the 1st dimension. +

+
+
+ + length2 + + : + int +
+
+

+ The length of the tensor in the 2nd dimension. +

+
+
+ + length3 + + : + int +
+
+

+ The length of the tensor in the 3rd dimension. +

+
+
+ + length4 + + : + int +
+
+

+ The length of the tensor in the 4th dimension. +

+
+
+ + initializer + + : + int -> int -> int -> int -> 'a +
+
+

+ The function used to initialize each element. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.isBackendAvailable backend + + +

+
+
+
+ Full Usage: + FurnaceImage.isBackendAvailable backend +
+
+ Parameters: +
    + + + backend + + : + Backend + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ Indicates if a given backend is available. +

+
+
+
+ + backend + + : + Backend +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + FurnaceImage.isCudaAvailable ?backend + + +

+
+
+
+ Full Usage: + FurnaceImage.isCudaAvailable ?backend +
+
+ Parameters: +
    + + + ?backend + + : + Backend + - + Return information for this backend. Defaults to Backend.Default. + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ Indicates if CUDA is available for a given backend. +

+
+
+
+ + ?backend + + : + Backend +
+
+

+ Return information for this backend. Defaults to Backend.Default. +

+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + FurnaceImage.isDeviceAvailable (device, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.isDeviceAvailable (device, ?backend) +
+
+ Parameters: +
    + + + device + + : + Device + - + The requested device. + +
    + + + ?backend + + : + Backend + - + Return information for this backend. Defaults to Backend.Default. + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ Indicates if a given device is available for a given backend. +

+
+
+
+ + device + + : + Device +
+
+

+ The requested device. +

+
+
+ + ?backend + + : + Backend +
+
+

+ Return information for this backend. Defaults to Backend.Default. +

+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + FurnaceImage.isDeviceTypeAvailable (deviceType, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.isDeviceTypeAvailable (deviceType, ?backend) +
+
+ Parameters: +
    + + + deviceType + + : + DeviceType + - + The requested device type. + +
    + + + ?backend + + : + Backend + - + Return information for this backend. Defaults to Backend.Default. + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ Indicates if a given device type is available for a given backend. +

+
+
+
+ + deviceType + + : + DeviceType +
+
+

+ The requested device type. +

+
+
+ + ?backend + + : + Backend +
+
+

+ Return information for this backend. Defaults to Backend.Default. +

+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + FurnaceImage.isTensor value + + +

+
+
+
+ Full Usage: + FurnaceImage.isTensor value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ Indicates if an object is a tensor +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + FurnaceImage.isinf input + + +

+
+
+
+ Full Usage: + FurnaceImage.isinf input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean tensor where each element indicates if the corresponding element in the input tensor is an infinity value. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.isnan input + + +

+
+
+
+ Full Usage: + FurnaceImage.isnan input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean tensor where each element indicates if the corresponding element in the input tensor is a NaN (not-a-number) value. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.jacobian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.jacobian f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.jacobianTv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.jacobianTv f x v +
+
+ Parameters: +
    + + + f + + : + Tensor -> Tensor + - + vector-to-vector function + +
    + + + x + + : + Tensor + - + Point at which the function f will be evaluated, it must have a single dimension. + +
    + + + v + + : + Tensor + - + Vector + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Transposed Jacobian-vector product of a vector-to-vector function `f`, at point `x`, along vector `v` +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+

+ vector-to-vector function +

+
+
+ + x + + : + Tensor +
+
+

+ Point at which the function f will be evaluated, it must have a single dimension. +

+
+
+ + v + + : + Tensor +
+
+

+ Vector +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.jacobianv f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.jacobianv f x v +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.laplacian f x + + +

+
+
+
+ Full Usage: + FurnaceImage.laplacian f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.le (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.le (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Return a boolean tensor for the element-wise less-than-or-equal comparison of the elements in the two tensors. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.leakyRelu (input, ?negativeSlope) + + +

+
+
+
+ Full Usage: + FurnaceImage.leakyRelu (input, ?negativeSlope) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?negativeSlope + + : + float + - + Controls the angle of the negative slope. Default: 0.01. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the leaky rectified linear unit function element-wise +

+
+

+ \[\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)\] +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?negativeSlope + + : + float +
+
+

+ Controls the angle of the negative slope. Default: 0.01. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.like (input, value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.like (input, value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + value + + : + obj + - + The .NET object giving the the initial values for the tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor based on the given .NET value with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + value + + : + obj +
+
+

+ The .NET object giving the the initial values for the tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.linspace (startVal, endVal, steps, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.linspace (startVal, endVal, steps, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + int + - + The starting value for the set of points. + +
    + + + endVal + + : + int + - + The ending value for the set of points. + +
    + + + steps + + : + int + - + The size of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a 1-D tensor of size steps whose values are evenly spaced from startVal to endVal. The values are going to be: \( + (\text{startVal}, + \text{startVal} + \frac{\text{endVal} - \text{startVal}}{\text{steps} - 1}, + \ldots, + \text{startVal} + (\text{steps} - 2) * \frac{\text{endVal} - \text{startVal}}{\text{steps} - 1}, + \text{endVal}) + \) + +

+
+
+
+ + startVal + + : + int +
+
+

+ The starting value for the set of points. +

+
+
+ + endVal + + : + int +
+
+

+ The ending value for the set of points. +

+
+
+ + steps + + : + int +
+
+

+ The size of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.linspace (startVal, endVal, steps, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.linspace (startVal, endVal, steps, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + float + - + The starting value for the set of points. + +
    + + + endVal + + : + float + - + The ending value for the set of points. + +
    + + + steps + + : + int + - + The size of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a 1-D tensor of size steps whose values are evenly spaced from startVal to endVal. The values are going to be: \( + (\text{startVal}, + \text{startVal} + \frac{\text{endVal} - \text{startVal}}{\text{steps} - 1}, + \ldots, + \text{startVal} + (\text{steps} - 2) * \frac{\text{endVal} - \text{startVal}}{\text{steps} - 1}, + \text{endVal}) + \) + +

+
+
+
+ + startVal + + : + float +
+
+

+ The starting value for the set of points. +

+
+
+ + endVal + + : + float +
+
+

+ The ending value for the set of points. +

+
+
+ + steps + + : + int +
+
+

+ The size of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.load fileName + + +

+
+
+
+ Full Usage: + FurnaceImage.load fileName +
+
+ Parameters: +
    + + + fileName + + : + string + +
    +
+
+ + Returns: + 'b + +
+
+
+
+
+
+ + + + + + +

+ Loads an object from the given file using a bespoke binary format. +

+
+

+ + The format used may change from version to version of Furnace. + +

+
+
+ + fileName + + : + string +
+
+
+
+
+ + Returns: + + 'b +
+
+
+
+
+ +

+ + + FurnaceImage.log input + + +

+
+
+
+ Full Usage: + FurnaceImage.log input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the natural logarithm of the elements of input. +

+
+

+ \[y_{i} = \log_{e} (x_{i})\] +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.log10 input + + +

+
+
+
+ Full Usage: + FurnaceImage.log10 input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the logarithm to the base 10 of the elements of input. +

+
+

+ \[y_{i} = \log_{10} (x_{i})\] +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logsoftmax (input, dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.logsoftmax (input, dim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + A dimension along which softmax will be computed. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a softmax followed by a logarithm. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ A dimension along which softmax will be computed. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logspace (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.logspace (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + int + - + The starting value for the set of points. + +
    + + + endVal + + : + int + - + The ending value for the set of points. + +
    + + + steps + + : + int + - + The size of the returned tensor. + +
    + + + ?baseVal + + : + int + - + The base of the logarithm. Default: 10. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a 1-D tensor of size steps whose values are evenly spaced logarithmically from \(\text{baseVal}^{\text{startVal}}\) to \(\text{baseVal}^{\text{endVal}}\). The values are going to be: \( + (\text{baseVal}^{\text{startVal}}, + \text{baseVal}^{(\text{startVal} + \frac{\text{endVal} - \text{startVal}}{ \text{steps} - 1})}, + \ldots, + \text{baseVal}^{(\text{startVal} + (\text{steps} - 2) * \frac{\text{endVal} - \text{startVal}}{ \text{steps} - 1})}, + \text{baseVal}^{\text{endVal}}) + \) + +

+
+
+
+ + startVal + + : + int +
+
+

+ The starting value for the set of points. +

+
+
+ + endVal + + : + int +
+
+

+ The ending value for the set of points. +

+
+
+ + steps + + : + int +
+
+

+ The size of the returned tensor. +

+
+
+ + ?baseVal + + : + int +
+
+

+ The base of the logarithm. Default: 10. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logspace (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.logspace (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + float + - + The starting value for the set of points. + +
    + + + endVal + + : + float + - + The ending value for the set of points. + +
    + + + steps + + : + int + - + The size of the returned tensor. + +
    + + + ?baseVal + + : + float + - + The base of the logarithm. Default: 10.0. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a 1-D tensor of size steps whose values are evenly spaced logarithmically from \(\text{baseVal}^{\text{startVal}}\) to \(\text{baseVal}^{\text{endVal}}\). The values are going to be: \( + (\text{baseVal}^{\text{startVal}}, + \text{baseVal}^{(\text{startVal} + \frac{\text{endVal} - \text{startVal}}{ \text{steps} - 1})}, + \ldots, + \text{baseVal}^{(\text{startVal} + (\text{steps} - 2) * \frac{\text{endVal} - \text{startVal}}{ \text{steps} - 1})}, + \text{baseVal}^{\text{endVal}}) + \) + +

+
+
+
+ + startVal + + : + float +
+
+

+ The starting value for the set of points. +

+
+
+ + endVal + + : + float +
+
+

+ The ending value for the set of points. +

+
+
+ + steps + + : + int +
+
+

+ The size of the returned tensor. +

+
+
+ + ?baseVal + + : + float +
+
+

+ The base of the logarithm. Default: 10.0. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.logsumexp (input, dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.logsumexp (input, dim, ?keepDim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a logsumexp followed by a logarithm. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.lt (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.lt (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean tensor for the element-wise less-than comparison of the elements in the two tensors. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.map mapping tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.map mapping tensor +
+
+ Parameters: +
    + + + mapping + + : + Tensor -> Tensor + - + The function to apply to each element of the tensor. + +
    + + + tensor + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor by mapping a function over all elements of the input tensor. +

+
+
+
+ + mapping + + : + Tensor -> Tensor +
+
+

+ The function to apply to each element of the tensor. +

+
+
+ + tensor + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.map2 mapping tensor1 tensor2 + + +

+
+
+
+ Full Usage: + FurnaceImage.map2 mapping tensor1 tensor2 +
+
+ Parameters: +
    + + + mapping + + : + Tensor -> Tensor -> Tensor + - + The function to apply to each element of the tensor. + +
    + + + tensor1 + + : + Tensor + - + The first input tensor. + +
    + + + tensor2 + + : + Tensor + - + The second input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor by mapping a function over all corresponding elements of two input tensors. +

+
+

+ The shapes of the two tensors must be identical. +

+
+
+ + mapping + + : + Tensor -> Tensor -> Tensor +
+
+

+ The function to apply to each element of the tensor. +

+
+
+ + tensor1 + + : + Tensor +
+
+

+ The first input tensor. +

+
+
+ + tensor2 + + : + Tensor +
+
+

+ The second input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.map3 mapping tensor1 tensor2 tensor3 + + +

+
+
+
+ Full Usage: + FurnaceImage.map3 mapping tensor1 tensor2 tensor3 +
+
+ Parameters: +
    + + + mapping + + : + Tensor -> Tensor -> Tensor -> Tensor + - + The function to apply to each element of the tensor. + +
    + + + tensor1 + + : + Tensor + - + The first input tensor. + +
    + + + tensor2 + + : + Tensor + - + The second input tensor. + +
    + + + tensor3 + + : + Tensor + - + The third input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor by mapping a function over all corresponding elements of three input tensors. +

+
+

+ The shapes of the three tensors must be identical. +

+
+
+ + mapping + + : + Tensor -> Tensor -> Tensor -> Tensor +
+
+

+ The function to apply to each element of the tensor. +

+
+
+ + tensor1 + + : + Tensor +
+
+

+ The first input tensor. +

+
+
+ + tensor2 + + : + Tensor +
+
+

+ The second input tensor. +

+
+
+ + tensor3 + + : + Tensor +
+
+

+ The third input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mapi mapping tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.mapi mapping tensor +
+
+ Parameters: +
    + + + mapping + + : + int[] -> Tensor -> Tensor + - + The function is passed the index of each element. The function to apply to each element of the tensor. + +
    + + + tensor + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor by mapping a function over all elements of the input tensor. +

+
+
+
+ + mapping + + : + int[] -> Tensor -> Tensor +
+
+

+ The function is passed the index of each element. The function to apply to each element of the tensor. +

+
+
+ + tensor + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mapi2 mapping tensor1 tensor2 + + +

+
+
+
+ Full Usage: + FurnaceImage.mapi2 mapping tensor1 tensor2 +
+
+ Parameters: +
    + + + mapping + + : + int[] -> Tensor -> Tensor -> Tensor + - + The function to apply to each element of the tensor. + +
    + + + tensor1 + + : + Tensor + - + The first input tensor. + +
    + + + tensor2 + + : + Tensor + - + The second input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor by mapping a function over all corresponding elements of two input tensors. +

+
+

+ The function is passed the index of each element. The shapes of the two tensors must be identical. +

+
+
+ + mapping + + : + int[] -> Tensor -> Tensor -> Tensor +
+
+

+ The function to apply to each element of the tensor. +

+
+
+ + tensor1 + + : + Tensor +
+
+

+ The first input tensor. +

+
+
+ + tensor2 + + : + Tensor +
+
+

+ The second input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mapi3 mapping tensor1 tensor2 tensor3 + + +

+
+
+
+ Full Usage: + FurnaceImage.mapi3 mapping tensor1 tensor2 tensor3 +
+
+ Parameters: +
    + + + mapping + + : + int[] -> Tensor -> Tensor -> Tensor -> Tensor + - + The function to apply to each element of the tensor. + +
    + + + tensor1 + + : + Tensor + - + The first input tensor. + +
    + + + tensor2 + + : + Tensor + - + The second input tensor. + +
    + + + tensor3 + + : + Tensor + - + The third input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor by mapping a function over all corresponding elements of three input tensors. +

+
+

+ The function is passed the index of each element. The shapes of the three tensors must be identical. +

+
+
+ + mapping + + : + int[] -> Tensor -> Tensor -> Tensor -> Tensor +
+
+

+ The function to apply to each element of the tensor. +

+
+
+ + tensor1 + + : + Tensor +
+
+

+ The first input tensor. +

+
+
+ + tensor2 + + : + Tensor +
+
+

+ The second input tensor. +

+
+
+ + tensor3 + + : + Tensor +
+
+

+ The third input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.matmul (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.matmul (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Matrix product of two tensors. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.max (a, dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.max (a, dim, ?keepDim) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The tensor. + +
    + + + dim + + : + int + - + The dimension. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the maximum value of all elements in the input tensor along the given dimension. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.max (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.max (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Each element of the tensor input is compared with the corresponding element of the tensor other and an element-wise maximum is taken. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.max input + + +

+
+
+
+ Full Usage: + FurnaceImage.max input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the maximum value of all elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool1d (input, kernelSize, ?stride, ?padding) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool1d (input, kernelSize, ?stride, ?padding) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D max pooling over an input signal composed of several input planes. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool1di (input, kernelSize, ?stride, ?padding) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool1di (input, kernelSize, ?stride, ?padding) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool2d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool2d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D max pooling over an input signal composed of several input planes. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool2di (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool2di (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool3d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool3d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSizes. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D max pooling over an input signal composed of several input planes. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSizes. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxpool3di (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxpool3di (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxunpool1d (input, indices, kernelSize, ?stride, ?padding, ?outputSize) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxunpool1d (input, indices, kernelSize, ?stride, ?padding, ?outputSize) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + indices + + : + Tensor + - + The indices selected by maxpool1di. + +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?outputSize + + : + seq<int> + - + The targeted output size. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes a partial inverse of maxpool1di +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + indices + + : + Tensor +
+
+

+ The indices selected by maxpool1di. +

+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?outputSize + + : + seq<int> +
+
+

+ The targeted output size. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxunpool2d (input, indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxunpool2d (input, indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + indices + + : + Tensor + - + The indices selected by maxpool2di. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSizes. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    + + + ?outputSize + + : + seq<int> + - + The targeted output size. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes a partial inverse of maxpool2di +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + indices + + : + Tensor +
+
+

+ The indices selected by maxpool2di. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSizes. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+ + ?outputSize + + : + seq<int> +
+
+

+ The targeted output size. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.maxunpool3d (input, indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) + + +

+
+
+
+ Full Usage: + FurnaceImage.maxunpool3d (input, indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + indices + + : + Tensor + - + The indices selected by maxpool3di. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSizes. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    + + + ?outputSize + + : + seq<int> + - + The targeted output size. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes a partial inverse of maxpool3di +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + indices + + : + Tensor +
+
+

+ The indices selected by maxpool3di. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSizes. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+ + ?outputSize + + : + seq<int> +
+
+

+ The targeted output size. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mean (input, dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.mean (input, dim, ?keepDim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the mean value of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. +

+
+

+ + If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s). + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mean input + + +

+
+
+
+ Full Usage: + FurnaceImage.mean input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the mean value of all elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.min (a, dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.min (a, dim, ?keepDim) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The tensor. + +
    + + + dim + + : + int + - + The dimension. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the minimum value of all elements in the input tensor along the given dimension. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.min (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.min (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Each element of the tensor input is compared with the corresponding element of the tensor other and an element-wise minimum is taken. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.min input + + +

+
+
+
+ Full Usage: + FurnaceImage.min input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the minimum value of all elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.move (input, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.move (input, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Move the tensor to a difference device, backend and/or change its element type. +

+
+

+ If the characteristics are unchanged the input tensor will be returned. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mseLoss (input, target, ?reduction) + + +

+
+
+
+ Full Usage: + FurnaceImage.mseLoss (input, target, ?reduction) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Creates a criterion that measures the mean squared error (squared L2 norm) between each element in the input and the target. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.mul (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.mul (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Return the element-wise multiplication of the two tensors. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.multinomial (probs, numSamples, ?normalize, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.multinomial (probs, numSamples, ?normalize, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + probs + + : + Tensor + - + The input tensor containing probabilities. + +
    + + + numSamples + + : + int + - + The number of samples to draw. + +
    + + + ?normalize + + : + bool + - + Indicates where the probabilities should first be normalized by their sum. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor where each row contains numSamples indices sampled from the multinomial probability distribution located in the corresponding row of tensor input. +

+
+
+
+ + probs + + : + Tensor +
+
+

+ The input tensor containing probabilities. +

+
+
+ + numSamples + + : + int +
+
+

+ The number of samples to draw. +

+
+
+ + ?normalize + + : + bool +
+
+

+ Indicates where the probabilities should first be normalized by their sum. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ne (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.ne (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a boolean tensor for the element-wise non-equality comparison of the elements in the two tensors. +

+
+

+ The shapes of input and other don’t need to match, but they must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.neg input + + +

+
+
+
+ Full Usage: + FurnaceImage.neg input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Return the element-wise negation of the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.nelement input + + +

+
+
+
+ Full Usage: + FurnaceImage.nelement input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ Returns the total number of elements in the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + FurnaceImage.nest level + + +

+
+
+
+ Full Usage: + FurnaceImage.nest level +
+
+ Parameters: +
    + + + level + + : + uint32 + - + The new nesting level. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Set the global nesting level for automatic differentiation. +

+
+
+
+ + level + + : + uint32 +
+
+

+ The new nesting level. +

+
+
+
+
+ +

+ + + FurnaceImage.nest () + + +

+
+
+
+ Full Usage: + FurnaceImage.nest () +
+
+
+
+
+
+
+ + + + + + +

+ Increase the global nesting level for automatic differentiation. +

+
+
+
+ +

+ + + FurnaceImage.nestLevel () + + +

+
+
+
+ Full Usage: + FurnaceImage.nestLevel () +
+
+ + Returns: + uint32 + +
+
+
+
+
+
+ + + + + + +

+ Get the global nesting level for automatic differentiation. +

+
+
+
+ + Returns: + + uint32 +
+
+
+
+
+ +

+ + + FurnaceImage.nestReset () + + +

+
+
+
+ Full Usage: + FurnaceImage.nestReset () +
+
+
+
+
+
+
+ + + + + + +

+ Reset the global nesting level for automatic differentiation to zero. +

+
+
+
+ +

+ + + FurnaceImage.nllLoss (input, target, ?weight, ?reduction) + + +

+
+
+
+ Full Usage: + FurnaceImage.nllLoss (input, target, ?weight, ?reduction) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?weight + + : + Tensor + - + A optional manual rescaling weight given to the loss of each batch element. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ The negative log likelihood loss. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?weight + + : + Tensor +
+
+

+ A optional manual rescaling weight given to the loss of each batch element. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.noDiff tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.noDiff tensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + - + The input. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new constant (non-differentiated) tensor. +

+
+
+
+ + tensor + + : + Tensor +
+
+

+ The input. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.normalize input + + +

+
+
+
+ Full Usage: + FurnaceImage.normalize input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Normalizes a vector so all the values are between zero and one (min-max scaling to 0..1). +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.one (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.one (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Get the scalar '1' tensor for the given configuration +

+
+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.oneLike (input, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.oneLike (input, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the '0' scalar tensor with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.onehot (length, hot, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.onehot (length, hot, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + hot + + : + int + - + The location to set to 1. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a one-hot tensor, with one location set to 1, and all others 0. +

+
+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + hot + + : + int +
+
+

+ The location to set to 1. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.onehotLike (input, length, hot, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.onehotLike (input, length, hot, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + hot + + : + int + - + The location to set to 1. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + A version of FurnaceImage.onehot with characteristics based on the input tensor. + +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + hot + + : + int +
+
+

+ The location to set to 1. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ones (length, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.ones (length, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor of the given length filled with '1' values for the given element type and configuration +

+
+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ones (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.ones (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor filled with '1' values for the given shape, element type and configuration +

+
+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.onesLike (input, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.onesLike (input, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + ?shape + + : + seq<int> + - + The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor filled with '1' values with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + ?shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.pad (input, paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.pad (input, paddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Add zero padding to each side of a tensor +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.permute (input, permutation) + + +

+
+
+
+ Full Usage: + FurnaceImage.permute (input, permutation) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + permutation + + : + seq<int> + - + The desired ordering of dimensions. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the original tensor with its dimensions permuted. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + permutation + + : + seq<int> +
+
+

+ The desired ordering of dimensions. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.pow (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.pow (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Return the element-wise exponentiation of the two tensors. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.primal tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.primal tensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Get the primal value of the tensor. +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.primalDerivative tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.primalDerivative tensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Get the primal and derivative values of the tensor. +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.rand (length, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.rand (length, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random numbers from a uniform distribution on the interval [0, 1) +

+
+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.rand (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.rand (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random numbers from a uniform distribution on the interval [0, 1) +

+
+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randLike (input, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randLike (input, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + ?shape + + : + seq<int> + - + The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random numbers from a uniform distribution on the interval [0, 1) with characteristics based on the input tensor +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + ?shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randint (low, high, length, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randint (low, high, length, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + low + + : + int + - + Lowest integer to be drawn from the distribution. Default: 0.. + +
    + + + high + + : + int + - + One above the highest integer to be drawn from the distribution. + +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random integers generated uniformly between low (inclusive) and high (exclusive). +

+
+
+
+ + low + + : + int +
+
+

+ Lowest integer to be drawn from the distribution. Default: 0.. +

+
+
+ + high + + : + int +
+
+

+ One above the highest integer to be drawn from the distribution. +

+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randint (low, high, shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randint (low, high, shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + low + + : + int + - + Lowest integer to be drawn from the distribution. Default: 0.. + +
    + + + high + + : + int + - + One above the highest integer to be drawn from the distribution. + +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random integers generated uniformly between low (inclusive) and high (exclusive). +

+
+
+
+ + low + + : + int +
+
+

+ Lowest integer to be drawn from the distribution. Default: 0.. +

+
+
+ + high + + : + int +
+
+

+ One above the highest integer to be drawn from the distribution. +

+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randintLike (input, low, high, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randintLike (input, low, high, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + low + + : + int + - + Lowest integer to be drawn from the distribution. Default: 0.. + +
    + + + high + + : + int + - + One above the highest integer to be drawn from the distribution. + +
    + + + ?shape + + : + seq<int> + - + The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor with the same shape as Tensor input filled with random integers generated uniformly between low (inclusive) and high (exclusive) with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + low + + : + int +
+
+

+ Lowest integer to be drawn from the distribution. Default: 0.. +

+
+
+ + high + + : + int +
+
+

+ One above the highest integer to be drawn from the distribution. +

+
+
+ + ?shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randn (length, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randn (length, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution). +

+
+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randn (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randn (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution). +

+
+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.randnLike (input, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.randnLike (input, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + ?shape + + : + seq<int> + - + The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution) with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + ?shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.relu input + + +

+
+
+
+ Full Usage: + FurnaceImage.relu input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the rectified linear unit function element-wise. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.repeat (input, dim, times) + + +

+
+
+
+ Full Usage: + FurnaceImage.repeat (input, dim, times) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension along which to repeat values. + +
    + + + times + + : + int + - + The number of repetitions for each element. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Repeat elements of a tensor +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension along which to repeat values. +

+
+
+ + times + + : + int +
+
+

+ The number of repetitions for each element. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.reverse value tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.reverse value tensor +
+
+ Parameters: +
    + + + value + + : + Tensor + - + The value to apply. + +
    + + + tensor + + : + Tensor + - + The output tensor. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Compute the reverse-mode derivative at the given output tensor. +

+
+
+
+ + value + + : + Tensor +
+
+

+ The value to apply. +

+
+
+ + tensor + + : + Tensor +
+
+

+ The output tensor. +

+
+
+
+
+ +

+ + + FurnaceImage.reverseDiff nestingTag tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.reverseDiff nestingTag tensor +
+
+ Parameters: +
    + + + nestingTag + + : + uint32 + - + The level tag. + +
    + + + tensor + + : + Tensor + - + The output tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Produce a new tensor suitable for calculating the reverse-mode derivative at the given level tag. +

+
+
+
+ + nestingTag + + : + uint32 +
+
+

+ The level tag. +

+
+
+ + tensor + + : + Tensor +
+
+

+ The output tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.reversePush value tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.reversePush value tensor +
+
+ Parameters: +
    + + + value + + : + Tensor + - + The value to apply. + +
    + + + tensor + + : + Tensor + - + The output tensor. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Push the given value as part of the reverse-mode computation at the given output tensor. +

+
+
+
+ + value + + : + Tensor +
+
+

+ The value to apply. +

+
+
+ + tensor + + : + Tensor +
+
+

+ The output tensor. +

+
+
+
+
+ +

+ + + FurnaceImage.reverseReset tensor + + +

+
+
+
+ Full Usage: + FurnaceImage.reverseReset tensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + - + The output tensor. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Reset the reverse mode computation associated with the given output tensor. +

+
+
+
+ + tensor + + : + Tensor +
+
+

+ The output tensor. +

+
+
+
+
+ +

+ + + FurnaceImage.round input + + +

+
+
+
+ Full Usage: + FurnaceImage.round input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with each of the elements of input rounded to the closest integer. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.safelog (input, ?epsilon) + + +

+
+
+
+ Full Usage: + FurnaceImage.safelog (input, ?epsilon) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?epsilon + + : + float + - + The smallest value a tensor element can take before the logarithm is applied. Default: 1e-12 + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the logarithm of the tensor after clamping the tensor so that all its elements are greater than epsilon. This is to avoid a -inf result for elements equal to zero. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?epsilon + + : + float +
+
+

+ The smallest value a tensor element can take before the logarithm is applied. Default: 1e-12 +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.save (value, fileName) + + +

+
+
+
+ Full Usage: + FurnaceImage.save (value, fileName) +
+
+ Parameters: +
    + + + value + + : + obj + +
    + + + fileName + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Saves the object to the given file using a bespoke binary format. +

+
+

+ + The format used may change from version to version of Furnace. + +

+
+
+ + value + + : + obj +
+
+
+ + fileName + + : + string +
+
+
+
+
+ +

+ + + FurnaceImage.scalar (value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.scalar (value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + value + + : + scalar + - + The scalar giving the the initial values for the tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new scalar tensor with the value value, for the given element type and configuration +

+
+
+
+ + value + + : + scalar +
+
+

+ The scalar giving the the initial values for the tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.scatter (input, dim, indices, destinationShape) + + +

+
+
+
+ Full Usage: + FurnaceImage.scatter (input, dim, indices, destinationShape) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The axis along which to index. + +
    + + + indices + + : + Tensor + - + The the indices of elements to gather. + +
    + + + destinationShape + + : + seq<int> + - + The destination shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Gathers values along an axis specified by dim. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The axis along which to index. +

+
+
+ + indices + + : + Tensor +
+
+

+ The the indices of elements to gather. +

+
+
+ + destinationShape + + : + seq<int> +
+
+

+ The destination shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.seed ?seed + + +

+
+
+
+ Full Usage: + FurnaceImage.seed ?seed +
+
+ Parameters: +
    + + + ?seed + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Seeds all backends with the given random seed, or a new seed based on the current time if no seed is specified. +

+
+
+
+ + ?seed + + : + int +
+
+
+
+
+ +

+ + + FurnaceImage.sigmoid input + + +

+
+
+
+ Full Usage: + FurnaceImage.sigmoid input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the sigmoid element-wise function +

+
+

+ \[\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}\] +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sign input + + +

+
+
+
+ Full Usage: + FurnaceImage.sign input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the signs of the elements of input. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sin input + + +

+
+
+
+ Full Usage: + FurnaceImage.sin input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the sine of the elements of input +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sinh input + + +

+
+
+
+ Full Usage: + FurnaceImage.sinh input +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the hyperbolic sine of the elements of input. +

+
+
+
+ + input + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.slice (input, index) + + +

+
+
+
+ Full Usage: + FurnaceImage.slice (input, index) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + index + + : + seq<int> + - + Index describing the slice. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Get a slice of a tensor +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + index + + : + seq<int> +
+
+

+ Index describing the slice. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.softmax (input, dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.softmax (input, dim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + A dimension along which softmax will be computed. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a softmax function. +

+
+

+ Softmax is defined as: \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ A dimension along which softmax will be computed. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.softplus input + + +

+
+
+
+ Full Usage: + FurnaceImage.softplus input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the softplus function element-wise. +

+
+

+ \[\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))\] +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.split (input, sizes, ?dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.split (input, sizes, ?dim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The tensor to split. + +
    + + + sizes + + : + seq<int> + - + The size of a single chunk or list of sizes for each chunk. + +
    + + + ?dim + + : + int + - + The dimension along which to split the tensor. + +
    +
+
+ + Returns: + Tensor[] + +
+
+
+
+
+
+ + + + + + +

+ Splits the tensor into chunks. The tensor will be split into sizes.Length chunks each with a corresponding size in the given dimension. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The tensor to split. +

+
+
+ + sizes + + : + seq<int> +
+
+

+ The size of a single chunk or list of sizes for each chunk. +

+
+
+ + ?dim + + : + int +
+
+

+ The dimension along which to split the tensor. +

+
+
+
+
+ + Returns: + + Tensor[] +
+
+
+
+
+ +

+ + + FurnaceImage.sqrt input + + +

+
+
+
+ Full Usage: + FurnaceImage.sqrt input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the square-root of the elements of input. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.squeeze (input, ?dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.squeeze (input, ?dim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?dim + + : + int + - + If given, the input will be squeezed only in this dimension. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor with all the dimensions of input of size 1 removed. +

+
+

+ If the tensor has a batch dimension of size 1, then squeeze(input) will also remove the batch dimension, which can lead to unexpected errors. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?dim + + : + int +
+
+

+ If given, the input will be squeezed only in this dimension. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.stack (tensors, ?dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.stack (tensors, ?dim) +
+
+ Parameters: +
    + + + tensors + + : + seq<Tensor> + - + The sequence of tensors to concatenate. + +
    + + + ?dim + + : + int + - + The dimension to insert. Has to be between 0 and the number of dimensions of concatenated tensors (inclusive). + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Concatenates sequence of tensors along a new dimension +

+
+

+ All tensors need to be of the same size. +

+
+
+ + tensors + + : + seq<Tensor> +
+
+

+ The sequence of tensors to concatenate. +

+
+
+ + ?dim + + : + int +
+
+

+ The dimension to insert. Has to be between 0 and the number of dimensions of concatenated tensors (inclusive). +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.standardize input + + +

+
+
+
+ Full Usage: + FurnaceImage.standardize input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the tensor after standardization (z-score normalization) +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.std (input, dim, ?keepDim, ?unbiased) + + +

+
+
+
+ Full Usage: + FurnaceImage.std (input, dim, ?keepDim, ?unbiased) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the standard deviation of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. +

+
+

+ + If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s). + If unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used. + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.std (input, ?unbiased) + + +

+
+
+
+ Full Usage: + FurnaceImage.std (input, ?unbiased) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the standard deviation of all elements in the input tensor. +

+
+

+ + If unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used. + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sub (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.sub (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Return the element-wise subtraction of the two tensors. +

+
+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sum (input, dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.sum (input, dim, ?keepDim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the sum of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. +

+
+

+ + If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s). + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.sum input + + +

+
+
+
+ Full Usage: + FurnaceImage.sum input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the sum of all elements in the input tensor +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.tan input + + +

+
+
+
+ Full Usage: + FurnaceImage.tan input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the tangent of the elements of input +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.tanh input + + +

+
+
+
+ Full Usage: + FurnaceImage.tanh input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the hyperbolic tangent of the elements of input. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.tensor (value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.tensor (value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + value + + : + obj + - + The .NET object used to form the initial values for the tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Creates a new tensor from the given data, using the given element type and configuration. + +

+
+

+ + The data is converted from arrays, sequences, lists and tuples of primitive values to a tensor whose shape is inferred from the data. + The fastest creation technique is a one dimensional array matching the desired dtype. Then use 'view' to reshape. +

+
+
+ + value + + : + obj +
+
+

+ The .NET object used to form the initial values for the tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+ Example +
+

+

+    let t1 = FurnaceImage.tensor [ 1 .. 10 ]
+    let t2 = FurnaceImage.tensor [ [ 1.0; 3.0; 4.0 ];
+                             [ 1.02; 3.04; 4.01 ] ]
+

+
+
+ +

+ + + FurnaceImage.toImage (input, ?pixelMin, ?pixelMax, ?normalize, ?gridCols) + + +

+
+
+
+ Full Usage: + FurnaceImage.toImage (input, ?pixelMin, ?pixelMax, ?normalize, ?gridCols) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?pixelMin + + : + double + - + The minimum pixel value. + +
    + + + ?pixelMax + + : + double + - + The maximum pixel value. + +
    + + + ?normalize + + : + bool + - + If True, shift the image to the range (0, 1), by the min and max values specified by range. + +
    + + + ?gridCols + + : + int + - + Number of columns of images in the grid. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Convert tensor to an image tensor with shape Channels x Height x Width +

+
+

+ If the input tensor has 4 dimensions, then make a single image grid. +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?pixelMin + + : + double +
+
+

+ The minimum pixel value. +

+
+
+ + ?pixelMax + + : + double +
+
+

+ The maximum pixel value. +

+
+
+ + ?normalize + + : + bool +
+
+

+ If True, shift the image to the range (0, 1), by the min and max values specified by range. +

+
+
+ + ?gridCols + + : + int +
+
+

+ Number of columns of images in the grid. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.toImageString (input, ?pixelMin, ?pixelMax, ?normalize, ?gridCols, ?asciiPalette) + + +

+
+
+
+ Full Usage: + FurnaceImage.toImageString (input, ?pixelMin, ?pixelMax, ?normalize, ?gridCols, ?asciiPalette) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?pixelMin + + : + double + - + The minimum pixel value. + +
    + + + ?pixelMax + + : + double + - + The maximum pixel value. + +
    + + + ?normalize + + : + bool + - + If True, shift the image to the range (0, 1), by the min and max values specified by range. + +
    + + + ?gridCols + + : + int + - + Number of columns of images in the grid. + +
    + + + ?asciiPalette + + : + string + - + The ASCII pallette to use. + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ Convert tensor to a grayscale image tensor and return a string representation approximating grayscale values +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?pixelMin + + : + double +
+
+

+ The minimum pixel value. +

+
+
+ + ?pixelMax + + : + double +
+
+

+ The maximum pixel value. +

+
+
+ + ?normalize + + : + bool +
+
+

+ If True, shift the image to the range (0, 1), by the min and max values specified by range. +

+
+
+ + ?gridCols + + : + int +
+
+

+ Number of columns of images in the grid. +

+
+
+ + ?asciiPalette + + : + string +
+
+

+ The ASCII pallette to use. +

+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + FurnaceImage.trace input + + +

+
+
+
+ Full Usage: + FurnaceImage.trace input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the sum of the elements of the diagonal of the input 2-D matrix +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.transpose input + + +

+
+
+
+ Full Usage: + FurnaceImage.transpose input +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor that is a transposed version of input with dimensions 0 and 1 swapped. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.transpose (input, dim0, dim1) + + +

+
+
+
+ Full Usage: + FurnaceImage.transpose (input, dim0, dim1) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim0 + + : + int + - + The first dimension to be transposed. + +
    + + + dim1 + + : + int + - + The second dimension to be transposed. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor that is a transposed version of input. The given dimensions dim0 and dim1 are swapped. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim0 + + : + int +
+
+

+ The first dimension to be transposed. +

+
+
+ + dim1 + + : + int +
+
+

+ The second dimension to be transposed. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.undilate (input, dilations) + + +

+
+
+
+ Full Usage: + FurnaceImage.undilate (input, dilations) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dilations + + : + seq<int> + - + The dilations to use. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Reverse the dilation of the tensor in using the given dilations in each corresponding dimension. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dilations + + : + seq<int> +
+
+

+ The dilations to use. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unflatten (input, dim, unflattenedShape) + + +

+
+
+
+ Full Usage: + FurnaceImage.unflatten (input, dim, unflattenedShape) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension to unflatten. + +
    + + + unflattenedShape + + : + seq<int> + - + New shape of the unflattened dimenension. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Unflattens a tensor dimension by expanding it to the given shape. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to unflatten. +

+
+
+ + unflattenedShape + + : + seq<int> +
+
+

+ New shape of the unflattened dimenension. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unsqueeze (input, dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.unsqueeze (input, dim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The index at which to insert the singleton dimension. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with a dimension of size one inserted at the specified position +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The index at which to insert the singleton dimension. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unsqueezeAs (input, other) + + +

+
+
+
+ Full Usage: + FurnaceImage.unsqueezeAs (input, other) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + other + + : + Tensor + - + The other tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with dimensions of size one appended to the end until the number of dimensions is the same as the other tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + other + + : + Tensor +
+
+

+ The other tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.unstack (input, ?dim) + + +

+
+
+
+ Full Usage: + FurnaceImage.unstack (input, ?dim) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?dim + + : + int + - + The dimension to remove. + +
    +
+
+ + Returns: + Tensor[] + +
+
+
+
+
+
+ + + + + + +

+ Removes a tensor dimension +

+
+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?dim + + : + int +
+
+

+ The dimension to remove. +

+
+
+
+
+ + Returns: + + Tensor[] +
+
+
+
+
+ +

+ + + FurnaceImage.var (input, dim, ?keepDim, ?unbiased) + + +

+
+
+
+ Full Usage: + FurnaceImage.var (input, dim, ?keepDim, ?unbiased) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the variance of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. +

+
+

+ + If keepdim is true, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s). + If unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used. + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.var (input, ?unbiased) + + +

+
+
+
+ Full Usage: + FurnaceImage.var (input, ?unbiased) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the variance of all elements in the input tensor. +

+
+

+ + If unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used. + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.version + + +

+
+
+
+ Full Usage: + FurnaceImage.version +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ Returns the version of the Furnace.Core assembly. +

+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + FurnaceImage.view (input, shape) + + +

+
+
+
+ Full Usage: + FurnaceImage.view (input, shape) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + shape + + : + int + - + The desired shape of returned tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the same data as the self tensor but of a different shape. +

+
+

+ The returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size. + The returned tensor shares the same data and must have the same number of elements, but may have a different size. + For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension, + or only span across original dimensions \(d, d+1, \dots, d+kd,d+1,…,d+k\) that satisfy the following contiguity-like condition that + \(\forall i = d, \dots, d+k-1∀i=d,…,d+k−1 ,\) \[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\] + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + shape + + : + int +
+
+

+ The desired shape of returned tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.view (input, shape) + + +

+
+
+
+ Full Usage: + FurnaceImage.view (input, shape) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the same data as the self tensor but of a different shape. +

+
+

+ The returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size. + The returned tensor shares the same data and must have the same number of elements, but may have a different size. + For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension, + or only span across original dimensions \(d, d+1, \dots, d+kd,d+1,…,d+k\) that satisfy the following contiguity-like condition that + \(\forall i = d, \dots, d+k-1∀i=d,…,d+k−1 ,\) \[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\] + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.viewAs (input, other) + + +

+
+
+
+ Full Usage: + FurnaceImage.viewAs (input, other) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + other + + : + Tensor + - + The result tensor has the same size as other. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ View this tensor as the same size as other. +

+
+

+ The returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size. + The returned tensor shares the same data and must have the same number of elements, but may have a different size. + For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension, + or only span across original dimensions \(d, d+1, \dots, d+kd,d+1,…,d+k\) that satisfy the following contiguity-like condition that + \(\forall i = d, \dots, d+k-1∀i=d,…,d+k−1 ,\) \[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\] + +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + other + + : + Tensor +
+
+

+ The result tensor has the same size as other. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.zero (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.zero (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Get the scalar zero tensor for the given configuration +

+
+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.zeroCreate count + + +

+
+
+
+ Full Usage: + FurnaceImage.zeroCreate count +
+
+ Parameters: +
    + + + count + + : + int + - + The number of elements in the tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Create a new 1D tensor using '0' as value for each element. +

+
+
+
+ + count + + : + int +
+
+

+ The number of elements in the tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.zeroLike (input, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.zeroLike (input, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the '0' scalar tensor with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.zeros (length, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.zeros (length, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + - + The length of the returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor filled with '0' values for the given length, element type and configuration +

+
+
+
+ + length + + : + int +
+
+

+ The length of the returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.zeros (shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.zeros (shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor filled with '0' values for the given shape, element type and configuration +

+
+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.zerosLike (input, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.zerosLike (input, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The shape and characteristics of input will determine those of the output tensor. + +
    + + + ?shape + + : + seq<int> + - + The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, the device of the input tensor is used. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor filled with '0' values with characteristics based on the input tensor. +

+
+
+
+ + input + + : + Tensor +
+
+

+ The shape and characteristics of input will determine those of the output tensor. +

+
+
+ + ?shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. Default: If None, the shape of the input tensor is used. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, the device of the input tensor is used. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, the element type of the input tensor is used. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, the backend of the input tensor is used. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-imageextensions.html b/reference/furnace-imageextensions.html new file mode 100644 index 00000000..2a241968 --- /dev/null +++ b/reference/furnace-imageextensions.html @@ -0,0 +1,893 @@ + + + + + ImageExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ImageExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + Tensor.loadImage (fileName, ?normalize, ?resize, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + Tensor.loadImage (fileName, ?normalize, ?resize, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + fileName + + : + string + +
    + + + ?normalize + + : + bool + +
    + + + ?resize + + : + int * int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Load an image file and return it as a tensor +

+
+

+ Extended Type: + Tensor +

+
+
+ + fileName + + : + string +
+
+
+ + ?normalize + + : + bool +
+
+
+ + ?resize + + : + int * int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.loadImage (fileName, ?normalize, ?resize, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + FurnaceImage.loadImage (fileName, ?normalize, ?resize, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + fileName + + : + string + - + The file name of the image to load. + +
    + + + ?normalize + + : + bool + - + If True, shift the image to the range (0, 1). + +
    + + + ?resize + + : + int * int + - + An optional new size for the image. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Load an image file as a tensor. +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + fileName + + : + string +
+
+

+ The file name of the image to load. +

+
+
+ + ?normalize + + : + bool +
+
+

+ If True, shift the image to the range (0, 1). +

+
+
+ + ?resize + + : + int * int +
+
+

+ An optional new size for the image. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.saveImage (fileName, ?pixelMin, ?pixelMax, ?normalize, ?resize, ?gridCols) + + +

+
+
+
+ Full Usage: + this.saveImage (fileName, ?pixelMin, ?pixelMax, ?normalize, ?resize, ?gridCols) +
+
+ Parameters: +
    + + + fileName + + : + string + +
    + + + ?pixelMin + + : + double + +
    + + + ?pixelMax + + : + double + +
    + + + ?normalize + + : + bool + +
    + + + ?resize + + : + int * int + +
    + + + ?gridCols + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Save tensor to an image file using png or jpg format +

+
+

+ Extended Type: + Tensor +

+
+
+ + fileName + + : + string +
+
+
+ + ?pixelMin + + : + double +
+
+
+ + ?pixelMax + + : + double +
+
+
+ + ?normalize + + : + bool +
+
+
+ + ?resize + + : + int * int +
+
+
+ + ?gridCols + + : + int +
+
+
+
+
+ +

+ + + FurnaceImage.saveImage (input, fileName, ?pixelMin, ?pixelMax, ?normalize, ?resize, ?gridCols) + + +

+
+
+
+ Full Usage: + FurnaceImage.saveImage (input, fileName, ?pixelMin, ?pixelMax, ?normalize, ?resize, ?gridCols) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + fileName + + : + string + - + The name of the file to save to. + +
    + + + ?pixelMin + + : + double + - + The minimum pixel value. + +
    + + + ?pixelMax + + : + double + - + The maximum pixel value. + +
    + + + ?normalize + + : + bool + - + If True, shift the image to the range (0, 1), by the min and max values specified by range. + +
    + + + ?resize + + : + int * int + - + An optional new size for the image. + +
    + + + ?gridCols + + : + int + - + Number of columns of images in the grid. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Save a given Tensor into an image file. +

+
+

+ If the input tensor has 4 dimensions, then make a single image grid. +

+

+ Extended Type: + FurnaceImage +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + fileName + + : + string +
+
+

+ The name of the file to save to. +

+
+
+ + ?pixelMin + + : + double +
+
+

+ The minimum pixel value. +

+
+
+ + ?pixelMax + + : + double +
+
+

+ The maximum pixel value. +

+
+
+ + ?normalize + + : + bool +
+
+

+ If True, shift the image to the range (0, 1), by the min and max values specified by range. +

+
+
+ + ?resize + + : + int * int +
+
+

+ An optional new size for the image. +

+
+
+ + ?gridCols + + : + int +
+
+

+ Number of columns of images in the grid. +

+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-imageutil.html b/reference/furnace-imageutil.html new file mode 100644 index 00000000..c19426a9 --- /dev/null +++ b/reference/furnace-imageutil.html @@ -0,0 +1,370 @@ + + + + + ImageUtil (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ImageUtil Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + loadImage fileName resize + + +

+
+
+
+ Full Usage: + loadImage fileName resize +
+
+ Parameters: +
    + + + fileName + + : + string + +
    + + + resize + + : + (int * int) option + +
    +
+
+ + Returns: + float32[,,] + +
+
+
+
+
+
+ + + + + + +

+ + Loads a pixel array from a file and optionally resizes it in the process. + +

+
+
+
+ + fileName + + : + string +
+
+
+ + resize + + : + (int * int) option +
+
+
+
+
+ + Returns: + + float32[,,] +
+
+
+
+
+ +

+ + + saveImage pixels fileName resize + + +

+
+
+
+ Full Usage: + saveImage pixels fileName resize +
+
+ Parameters: +
    + + + pixels + + : + float32[,,] + +
    + + + fileName + + : + string + +
    + + + resize + + : + (int * int) option + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Saves the given pixel array to a file and optionally resizes it in the process. Supports .png format. + +

+
+
+
+ + pixels + + : + float32[,,] +
+
+
+ + fileName + + : + string +
+
+
+ + resize + + : + (int * int) option +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-batchnorm1d.html b/reference/furnace-model-batchnorm1d.html new file mode 100644 index 00000000..d8165d53 --- /dev/null +++ b/reference/furnace-model-batchnorm1d.html @@ -0,0 +1,652 @@ + + + + + BatchNorm1d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ BatchNorm1d Type +

+ +
+
+

+ Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputs with optional additional channel dimension) +

+
+

+

+ The mean and standard-deviation are calculated per-dimension over the mini-batches and + \(\gamma\( and \(\beta\) are learnable parameter vectors of size \(C\) (where \(C\) is the + input size). By default, the elements of \(\gamma\) are set to 1 and the elements of + \(\beta\) are set to 0. The standard-deviation is calculated via the biased estimator, + equivalent to FurnaceImage.var(input, unbiased=False). +

+ Also by default, during training this layer keeps running estimates of its computed mean + and variance, which are then used for normalization during evaluation. The running estimates + are kept with a default momentum of 0.1. +

+ If trackRunningStats is set to False, this layer then does not keep running estimates, + and batch statistics are instead used during evaluation time as well. +

+

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + BatchNorm1d(numFeatures, ?eps, ?momentum, ?affine, ?trackRunningStats, ?reversible) + + +

+
+
+
+ Full Usage: + BatchNorm1d(numFeatures, ?eps, ?momentum, ?affine, ?trackRunningStats, ?reversible) +
+
+ Parameters: +
    + + + numFeatures + + : + int + +
    + + + ?eps + + : + double + +
    + + + ?momentum + + : + Tensor + +
    + + + ?affine + + : + bool + +
    + + + ?trackRunningStats + + : + bool + +
    + + + ?reversible + + : + bool + +
    +
+
+ + Returns: + BatchNorm1d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + numFeatures + + : + int +
+
+
+ + ?eps + + : + double +
+
+
+ + ?momentum + + : + Tensor +
+
+
+ + ?affine + + : + bool +
+
+
+ + ?trackRunningStats + + : + bool +
+
+
+ + ?reversible + + : + bool +
+
+
+
+
+ + Returns: + + BatchNorm1d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mean + + +

+
+
+
+ Full Usage: + this.mean +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.stddev + + +

+
+
+
+ Full Usage: + this.stddev +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.variance + + +

+
+
+
+ Full Usage: + this.variance +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-batchnorm2d.html b/reference/furnace-model-batchnorm2d.html new file mode 100644 index 00000000..64f525c0 --- /dev/null +++ b/reference/furnace-model-batchnorm2d.html @@ -0,0 +1,652 @@ + + + + + BatchNorm2d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ BatchNorm2d Type +

+ +
+
+

+ Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with optional additional channel dimension) +

+
+

+

+ The mean and standard-deviation are calculated per-dimension over the mini-batches and + \(\gamma\( and \(\beta\) are learnable parameter vectors of size \(C\) (where \(C\) is the + input size). By default, the elements of \(\gamma\) are set to 1 and the elements of + \(\beta\) are set to 0. The standard-deviation is calculated via the biased estimator, + equivalent to FurnaceImage.var(input, unbiased=False). +

+ Also by default, during training this layer keeps running estimates of its computed mean + and variance, which are then used for normalization during evaluation. The running estimates + are kept with a default momentum of 0.1. +

+ If trackRunningStats is set to False, this layer then does not keep running estimates, + and batch statistics are instead used during evaluation time as well. +

+

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + BatchNorm2d(numFeatures, ?eps, ?momentum, ?affine, ?trackRunningStats, ?reversible) + + +

+
+
+
+ Full Usage: + BatchNorm2d(numFeatures, ?eps, ?momentum, ?affine, ?trackRunningStats, ?reversible) +
+
+ Parameters: +
    + + + numFeatures + + : + int + +
    + + + ?eps + + : + double + +
    + + + ?momentum + + : + Tensor + +
    + + + ?affine + + : + bool + +
    + + + ?trackRunningStats + + : + bool + +
    + + + ?reversible + + : + bool + +
    +
+
+ + Returns: + BatchNorm2d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + numFeatures + + : + int +
+
+
+ + ?eps + + : + double +
+
+
+ + ?momentum + + : + Tensor +
+
+
+ + ?affine + + : + bool +
+
+
+ + ?trackRunningStats + + : + bool +
+
+
+ + ?reversible + + : + bool +
+
+
+
+
+ + Returns: + + BatchNorm2d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mean + + +

+
+
+
+ Full Usage: + this.mean +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.stddev + + +

+
+
+
+ Full Usage: + this.stddev +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.variance + + +

+
+
+
+ Full Usage: + this.variance +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-batchnorm3d.html b/reference/furnace-model-batchnorm3d.html new file mode 100644 index 00000000..05e90770 --- /dev/null +++ b/reference/furnace-model-batchnorm3d.html @@ -0,0 +1,652 @@ + + + + + BatchNorm3d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ BatchNorm3d Type +

+ +
+
+

+ Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs with optional additional channel dimension) +

+
+

+

+ The mean and standard-deviation are calculated per-dimension over the mini-batches and + \(\gamma\( and \(\beta\) are learnable parameter vectors of size \(C\) (where \(C\) is the + input size). By default, the elements of \(\gamma\) are set to 1 and the elements of + \(\beta\) are set to 0. The standard-deviation is calculated via the biased estimator, + equivalent to FurnaceImage.var(input, unbiased=False). +

+ Also by default, during training this layer keeps running estimates of its computed mean + and variance, which are then used for normalization during evaluation. The running estimates + are kept with a default momentum of 0.1. +

+ If trackRunningStats is set to False, this layer then does not keep running estimates, + and batch statistics are instead used during evaluation time as well. +

+

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + BatchNorm3d(numFeatures, ?eps, ?momentum, ?affine, ?trackRunningStats, ?reversible) + + +

+
+
+
+ Full Usage: + BatchNorm3d(numFeatures, ?eps, ?momentum, ?affine, ?trackRunningStats, ?reversible) +
+
+ Parameters: +
    + + + numFeatures + + : + int + +
    + + + ?eps + + : + double + +
    + + + ?momentum + + : + Tensor + +
    + + + ?affine + + : + bool + +
    + + + ?trackRunningStats + + : + bool + +
    + + + ?reversible + + : + bool + +
    +
+
+ + Returns: + BatchNorm3d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + numFeatures + + : + int +
+
+
+ + ?eps + + : + double +
+
+
+ + ?momentum + + : + Tensor +
+
+
+ + ?affine + + : + bool +
+
+
+ + ?trackRunningStats + + : + bool +
+
+
+ + ?reversible + + : + bool +
+
+
+
+
+ + Returns: + + BatchNorm3d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mean + + +

+
+
+
+ Full Usage: + this.mean +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.stddev + + +

+
+
+
+ Full Usage: + this.stddev +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.variance + + +

+
+
+
+ Full Usage: + this.variance +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-conv1d.html b/reference/furnace-model-conv1d.html new file mode 100644 index 00000000..865bb063 --- /dev/null +++ b/reference/furnace-model-conv1d.html @@ -0,0 +1,456 @@ + + + + + Conv1d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Conv1d Type +

+ +
+
+

+ A model that applies a 1D convolution over an input signal composed of several input planes +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Conv1d(inChannels, outChannels, kernelSize, ?stride, ?padding, ?dilation, ?bias) + + +

+
+
+
+ Full Usage: + Conv1d(inChannels, outChannels, kernelSize, ?stride, ?padding, ?dilation, ?bias) +
+
+ Parameters: +
    + + + inChannels + + : + int + +
    + + + outChannels + + : + int + +
    + + + kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?bias + + : + bool + +
    +
+
+ + Returns: + Conv1d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inChannels + + : + int +
+
+
+ + outChannels + + : + int +
+
+
+ + kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?bias + + : + bool +
+
+
+
+
+ + Returns: + + Conv1d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the bias parameter of the model +

+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the weight parameter of the model +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-conv2d.html b/reference/furnace-model-conv2d.html new file mode 100644 index 00000000..ddf1d0b6 --- /dev/null +++ b/reference/furnace-model-conv2d.html @@ -0,0 +1,520 @@ + + + + + Conv2d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Conv2d Type +

+ +
+
+

+ A model that applies a 2D convolution over an input signal composed of several input planes +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Conv2d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) + + +

+
+
+
+ Full Usage: + Conv2d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) +
+
+ Parameters: +
    + + + inChannels + + : + int + +
    + + + outChannels + + : + int + +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilations + + : + seq<int> + +
    + + + ?bias + + : + bool + +
    +
+
+ + Returns: + Conv2d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inChannels + + : + int +
+
+
+ + outChannels + + : + int +
+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+ + ?bias + + : + bool +
+
+
+
+
+ + Returns: + + Conv2d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the bias parameter of the model +

+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the weight parameter of the model +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-conv3d.html b/reference/furnace-model-conv3d.html new file mode 100644 index 00000000..6ba1a09c --- /dev/null +++ b/reference/furnace-model-conv3d.html @@ -0,0 +1,520 @@ + + + + + Conv3d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Conv3d Type +

+ +
+
+

+ A model that applies a 3D convolution over an input signal composed of several input planes +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Conv3d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) + + +

+
+
+
+ Full Usage: + Conv3d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) +
+
+ Parameters: +
    + + + inChannels + + : + int + +
    + + + outChannels + + : + int + +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilations + + : + seq<int> + +
    + + + ?bias + + : + bool + +
    +
+
+ + Returns: + Conv3d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inChannels + + : + int +
+
+
+ + outChannels + + : + int +
+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+ + ?bias + + : + bool +
+
+
+
+
+ + Returns: + + Conv3d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the bias parameter of the model +

+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the weight parameter of the model +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-convtranspose1d.html b/reference/furnace-model-convtranspose1d.html new file mode 100644 index 00000000..aef01a04 --- /dev/null +++ b/reference/furnace-model-convtranspose1d.html @@ -0,0 +1,456 @@ + + + + + ConvTranspose1d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ConvTranspose1d Type +

+ +
+
+

+ A model that applies a 1D transposed convolution operator over an input image composed of several input planes. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + ConvTranspose1d(inChannels, outChannels, kernelSize, ?stride, ?padding, ?dilation, ?bias) + + +

+
+
+
+ Full Usage: + ConvTranspose1d(inChannels, outChannels, kernelSize, ?stride, ?padding, ?dilation, ?bias) +
+
+ Parameters: +
    + + + inChannels + + : + int + +
    + + + outChannels + + : + int + +
    + + + kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?bias + + : + bool + +
    +
+
+ + Returns: + ConvTranspose1d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inChannels + + : + int +
+
+
+ + outChannels + + : + int +
+
+
+ + kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?bias + + : + bool +
+
+
+
+
+ + Returns: + + ConvTranspose1d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the bias parameter of the model +

+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the weight parameter of the model +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-convtranspose2d.html b/reference/furnace-model-convtranspose2d.html new file mode 100644 index 00000000..62272337 --- /dev/null +++ b/reference/furnace-model-convtranspose2d.html @@ -0,0 +1,520 @@ + + + + + ConvTranspose2d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ConvTranspose2d Type +

+ +
+
+

+ A model that applies a 2D transposed convolution operator over an input image composed of several input planes. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + ConvTranspose2d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) + + +

+
+
+
+ Full Usage: + ConvTranspose2d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) +
+
+ Parameters: +
    + + + inChannels + + : + int + +
    + + + outChannels + + : + int + +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilations + + : + seq<int> + +
    + + + ?bias + + : + bool + +
    +
+
+ + Returns: + ConvTranspose2d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inChannels + + : + int +
+
+
+ + outChannels + + : + int +
+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+ + ?bias + + : + bool +
+
+
+
+
+ + Returns: + + ConvTranspose2d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the bias parameter of the model +

+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the weight parameter of the model +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-convtranspose3d.html b/reference/furnace-model-convtranspose3d.html new file mode 100644 index 00000000..5d5f9f35 --- /dev/null +++ b/reference/furnace-model-convtranspose3d.html @@ -0,0 +1,520 @@ + + + + + ConvTranspose3d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ConvTranspose3d Type +

+ +
+
+

+ A model that applies a 3D transposed convolution operator over an input image composed of several input planes. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + ConvTranspose3d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) + + +

+
+
+
+ Full Usage: + ConvTranspose3d(inChannels, outChannels, ?kernelSize, ?stride, ?padding, ?dilation, ?kernelSizes, ?strides, ?paddings, ?dilations, ?bias) +
+
+ Parameters: +
    + + + inChannels + + : + int + +
    + + + outChannels + + : + int + +
    + + + ?kernelSize + + : + int + +
    + + + ?stride + + : + int + +
    + + + ?padding + + : + int + +
    + + + ?dilation + + : + int + +
    + + + ?kernelSizes + + : + seq<int> + +
    + + + ?strides + + : + seq<int> + +
    + + + ?paddings + + : + seq<int> + +
    + + + ?dilations + + : + seq<int> + +
    + + + ?bias + + : + bool + +
    +
+
+ + Returns: + ConvTranspose3d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inChannels + + : + int +
+
+
+ + outChannels + + : + int +
+
+
+ + ?kernelSize + + : + int +
+
+
+ + ?stride + + : + int +
+
+
+ + ?padding + + : + int +
+
+
+ + ?dilation + + : + int +
+
+
+ + ?kernelSizes + + : + seq<int> +
+
+
+ + ?strides + + : + seq<int> +
+
+
+ + ?paddings + + : + seq<int> +
+
+
+ + ?dilations + + : + seq<int> +
+
+
+ + ?bias + + : + bool +
+
+
+
+
+ + Returns: + + ConvTranspose3d +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the bias parameter of the model +

+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the weight parameter of the model +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-dropout.html b/reference/furnace-model-dropout.html new file mode 100644 index 00000000..6868709d --- /dev/null +++ b/reference/furnace-model-dropout.html @@ -0,0 +1,258 @@ + + + + + Dropout (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Dropout Type +

+ +
+
+

+ A model which during training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Dropout(?p) + + +

+
+
+
+ Full Usage: + Dropout(?p) +
+
+ Parameters: +
    + + + ?p + + : + double + +
    +
+
+ + Returns: + Dropout + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?p + + : + double +
+
+
+
+
+ + Returns: + + Dropout +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-dropout2d.html b/reference/furnace-model-dropout2d.html new file mode 100644 index 00000000..a05234be --- /dev/null +++ b/reference/furnace-model-dropout2d.html @@ -0,0 +1,258 @@ + + + + + Dropout2d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Dropout2d Type +

+ +
+
+

+ A model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Dropout2d(?p) + + +

+
+
+
+ Full Usage: + Dropout2d(?p) +
+
+ Parameters: +
    + + + ?p + + : + double + +
    +
+
+ + Returns: + Dropout2d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?p + + : + double +
+
+
+
+
+ + Returns: + + Dropout2d +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-dropout3d.html b/reference/furnace-model-dropout3d.html new file mode 100644 index 00000000..8f32972e --- /dev/null +++ b/reference/furnace-model-dropout3d.html @@ -0,0 +1,258 @@ + + + + + Dropout3d (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Dropout3d Type +

+ +
+
+

+ A model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Dropout3d(?p) + + +

+
+
+
+ Full Usage: + Dropout3d(?p) +
+
+ Parameters: +
    + + + ?p + + : + double + +
    +
+
+ + Returns: + Dropout3d + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?p + + : + double +
+
+
+
+
+ + Returns: + + Dropout3d +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-linear.html b/reference/furnace-model-linear.html new file mode 100644 index 00000000..62bc18ef --- /dev/null +++ b/reference/furnace-model-linear.html @@ -0,0 +1,392 @@ + + + + + Linear (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Linear Type +

+ +
+
+

+ A model that applies a linear transformation to the incoming data: \(y = xA^T + b\) +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Linear(inFeatures, outFeatures, ?bias) + + +

+
+
+
+ Full Usage: + Linear(inFeatures, outFeatures, ?bias) +
+
+ Parameters: +
    + + + inFeatures + + : + int + +
    + + + outFeatures + + : + int + +
    + + + ?bias + + : + bool + +
    +
+
+ + Returns: + Linear + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inFeatures + + : + int +
+
+
+ + outFeatures + + : + int +
+
+
+ + ?bias + + : + bool +
+
+
+
+
+ + Returns: + + Linear +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.bias + + +

+
+
+
+ Full Usage: + this.bias +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the bias parameter of the model +

+
+
+
+ +

+ + + this.weight + + +

+
+
+
+ Full Usage: + this.weight +
+
+
+
+
+
+
+ + + + + + +

+ Get or set the weight parameter of the model +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-lstm.html b/reference/furnace-model-lstm.html new file mode 100644 index 00000000..c4be82e4 --- /dev/null +++ b/reference/furnace-model-lstm.html @@ -0,0 +1,672 @@ + + + + + LSTM (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ LSTM Type +

+ +
+
+

+ Long short-term memory (LSTM) recurrent neural network. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + LSTM(inputSize, hiddenSize, ?numLayers, ?bias, ?batchFirst, ?dropout, ?bidirectional) + + +

+
+
+
+ Full Usage: + LSTM(inputSize, hiddenSize, ?numLayers, ?bias, ?batchFirst, ?dropout, ?bidirectional) +
+
+ Parameters: +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    + + + ?numLayers + + : + int + +
    + + + ?bias + + : + bool + +
    + + + ?batchFirst + + : + bool + +
    + + + ?dropout + + : + float + +
    + + + ?bidirectional + + : + bool + +
    +
+
+ + Returns: + LSTM + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+ + ?numLayers + + : + int +
+
+
+ + ?bias + + : + bool +
+
+
+ + ?batchFirst + + : + bool +
+
+
+ + ?dropout + + : + float +
+
+
+ + ?bidirectional + + : + bool +
+
+
+
+
+ + Returns: + + LSTM +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.forwardWithHidden (input, hidden, cell) + + +

+
+
+
+ Full Usage: + this.forwardWithHidden (input, hidden, cell) +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + cell + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + cell + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + this.hiddenSize + + +

+
+
+
+ Full Usage: + this.hiddenSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.inputSize + + +

+
+
+
+ Full Usage: + this.inputSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.newHidden batchSize + + +

+
+
+
+ Full Usage: + this.newHidden batchSize +
+
+ Parameters: +
    + + + batchSize + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + batchSize + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-lstmcell.html b/reference/furnace-model-lstmcell.html new file mode 100644 index 00000000..a0288de6 --- /dev/null +++ b/reference/furnace-model-lstmcell.html @@ -0,0 +1,812 @@ + + + + + LSTMCell (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ LSTMCell Type +

+ +
+
+

+ Unit cell of a long short-term memory (LSTM) recurrent neural network. Prefer using the RNN class instead, which can combine RNNCells in multiple layers. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + LSTMCell(inputSize, hiddenSize, ?bias, ?checkShapes) + + +

+
+
+
+ Full Usage: + LSTMCell(inputSize, hiddenSize, ?bias, ?checkShapes) +
+
+ Parameters: +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    + + + ?bias + + : + bool + +
    + + + ?checkShapes + + : + bool + +
    +
+
+ + Returns: + LSTMCell + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+ + ?bias + + : + bool +
+
+
+ + ?checkShapes + + : + bool +
+
+
+
+
+ + Returns: + + LSTMCell +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.forwardSequence input + + +

+
+
+
+ Full Usage: + this.forwardSequence input +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.forwardSequenceWithHidden (input, hidden, cell) + + +

+
+
+
+ Full Usage: + this.forwardSequenceWithHidden (input, hidden, cell) +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + cell + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + cell + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.forwardWithHidden (input, hidden, cell) + + +

+
+
+
+ Full Usage: + this.forwardWithHidden (input, hidden, cell) +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + cell + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + cell + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.hiddenSize + + +

+
+
+
+ Full Usage: + this.hiddenSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.inputSize + + +

+
+
+
+ Full Usage: + this.inputSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.newHidden batchSize + + +

+
+
+
+ Full Usage: + this.newHidden batchSize +
+
+ Parameters: +
    + + + batchSize + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + batchSize + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-mode.html b/reference/furnace-model-mode.html new file mode 100644 index 00000000..6d8cd9f6 --- /dev/null +++ b/reference/furnace-model-mode.html @@ -0,0 +1,296 @@ + + + + + Mode (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Mode Type +

+ +
+
+

+ Indicates the training or evaluation mode for a model. +

+
+
+
+
+
+
+
+

+ Record fields +

+ + + + + + + + + + + + + + + + + +
+ Record Field + + Description +
+
+ +

+ + + Eval + + +

+
+
+
+ Full Usage: + Eval +
+
+ + Field type: + Mode + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + Mode +
+
+
+
+
+ +

+ + + Train + + +

+
+
+
+ Full Usage: + Train +
+
+ + Field type: + Mode + +
+ Modifiers: + static +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + Mode +
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-model-2.html b/reference/furnace-model-model-2.html new file mode 100644 index 00000000..8370d4ae --- /dev/null +++ b/reference/furnace-model-model-2.html @@ -0,0 +1,1049 @@ + + + + + Model<'In, 'Out> (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Model<'In, 'Out> Type +

+ +
+
+

+ Represents a model, primarily a collection of named parameters and sub-models and a function governed by them. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Model(?f, ?parameters, ?buffers, ?models) + + +

+
+
+
+ Full Usage: + Model(?f, ?parameters, ?buffers, ?models) +
+
+ Parameters: +
    + + + ?f + + : + 'In -> 'Out + +
    + + + ?parameters + + : + seq<Parameter> + +
    + + + ?buffers + + : + seq<Parameter> + +
    + + + ?models + + : + seq<ModelBase> + +
    +
+
+ + Returns: + Model<'In, 'Out> + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?f + + : + 'In -> 'Out +
+
+
+ + ?parameters + + : + seq<Parameter> +
+
+
+ + ?buffers + + : + seq<Parameter> +
+
+
+ + ?models + + : + seq<ModelBase> +
+
+
+
+
+ + Returns: + + Model<'In, 'Out> +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.asFunction parameters input + + +

+
+
+
+ Full Usage: + this.asFunction parameters input +
+
+ Parameters: +
    + + + parameters + + : + Tensor + +
    + + + input + + : + 'In + +
    +
+
+ + Returns: + 'Out + +
+
+
+
+
+
+ + + + + + +

+ Use the model as a function of its parameters and input. +

+
+

+ + The resulting function can be composed with a loss function and differentiated. + During execution the parameters of the model are temporarily set to the supplied parameters. + +

+
+
+ + parameters + + : + Tensor +
+
+
+ + input + + : + 'In +
+
+
+
+
+ + Returns: + + 'Out +
+
+
+
+
+ +

+ + + this.clone () + + +

+
+
+
+ Full Usage: + this.clone () +
+
+ + Returns: + Model<'In, 'Out> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Model<'In, 'Out> +
+
+
+
+
+ +

+ + + this.forward arg1 + + +

+
+
+
+ Full Usage: + this.forward arg1 +
+
+ Parameters: +
    + + + arg0 + + : + 'In + +
    +
+
+ + Returns: + 'Out + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + arg0 + + : + 'In +
+
+
+
+
+ + Returns: + + 'Out +
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + t --> model + + +

+
+
+
+ Full Usage: + t --> model +
+
+ Parameters: +
    + + + t + + : + 'In + +
    + + + model + + : + Model<'In, 'Out> + +
    +
+
+ + Returns: + 'Out + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + t + + : + 'In +
+
+
+ + model + + : + Model<'In, 'Out> +
+
+
+
+
+ + Returns: + + 'Out +
+
+
+
+
+ +

+ + + f --> model + + +

+
+
+
+ Full Usage: + f --> model +
+
+ Parameters: +
    + + + f + + : + 'In -> 'Out + +
    + + + model + + : + Model<'Out, 'Out2> + +
    +
+
+ + Returns: + Model<'In, 'Out2> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + 'In -> 'Out +
+
+
+ + model + + : + Model<'Out, 'Out2> +
+
+
+
+
+ + Returns: + + Model<'In, 'Out2> +
+
+
+
+
+ +

+ + + model --> f + + +

+
+
+
+ Full Usage: + model --> f +
+
+ Parameters: +
    + + + model + + : + Model<'In, 'Out> + +
    + + + f + + : + 'Out -> 'Out2 + +
    +
+
+ + Returns: + Model<'In, 'Out2> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + model + + : + Model<'In, 'Out> +
+
+
+ + f + + : + 'Out -> 'Out2 +
+
+
+
+
+ + Returns: + + Model<'In, 'Out2> +
+
+
+
+
+ +

+ + + model1 --> model2 + + +

+
+
+
+ Full Usage: + model1 --> model2 +
+
+ Parameters: +
    + + + model1 + + : + Model<'In, 'Out> + +
    + + + model2 + + : + Model<'Out, 'Out2> + +
    +
+
+ + Returns: + Model<'In, 'Out2> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + model1 + + : + Model<'In, 'Out> +
+
+
+ + model2 + + : + Model<'Out, 'Out2> +
+
+
+
+
+ + Returns: + + Model<'In, 'Out2> +
+
+
+
+
+ +

+ + + Model.compose model1 model2 + + +

+
+
+
+ Full Usage: + Model.compose model1 model2 +
+
+ Parameters: +
    + + + model1 + + : + Model<'In, 'Out> + +
    + + + model2 + + : + Model<'Out, 'Out2> + +
    +
+
+ + Returns: + Model<'In, 'Out2> + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + model1 + + : + Model<'In, 'Out> +
+
+
+ + model2 + + : + Model<'Out, 'Out2> +
+
+
+
+
+ + Returns: + + Model<'In, 'Out2> +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-model.html b/reference/furnace-model-model.html new file mode 100644 index 00000000..c7b52618 --- /dev/null +++ b/reference/furnace-model-model.html @@ -0,0 +1,157 @@ + + + + + Model (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Model Type +

+ +
+
+

+ +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-modelbase.html b/reference/furnace-model-modelbase.html new file mode 100644 index 00000000..1f51e77a --- /dev/null +++ b/reference/furnace-model-modelbase.html @@ -0,0 +1,2766 @@ + + + + + ModelBase (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ModelBase Type +

+ +
+
+

+ Represents the base class of all models. +

+
+
+
+
+
+
+
+

+ Record fields +

+ + + + + + + + + + + + + +
+ Record Field + + Description +
+
+ +

+ + + mode + + +

+
+
+
+ Full Usage: + mode +
+
+ + Field type: + Mode + +
+ Modifiers: + mutable +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + Mode +
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + ModelBase() + + +

+
+
+
+ Full Usage: + ModelBase() +
+
+ + Returns: + ModelBase + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + ModelBase +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.addBuffer (buffer, name) + + +

+
+
+
+ Full Usage: + this.addBuffer (buffer, name) +
+
+ Parameters: +
    + + + buffer + + : + Parameter + +
    + + + name + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + buffer + + : + Parameter +
+
+
+ + name + + : + string +
+
+
+
+
+ +

+ + + this.addBuffer buffers + + +

+
+
+
+ Full Usage: + this.addBuffer buffers +
+
+ Parameters: +
    + + + buffers + + : + (Parameter * string)[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + buffers + + : + (Parameter * string)[] +
+
+
+
+
+ +

+ + + this.addBuffer buffers + + +

+
+
+
+ Full Usage: + this.addBuffer buffers +
+
+ Parameters: + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + buffers + + : + Parameter[] +
+
+
+
+
+ +

+ + + this.addModel (model, name) + + +

+
+
+
+ Full Usage: + this.addModel (model, name) +
+
+ Parameters: +
    + + + model + + : + Model + +
    + + + name + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + model + + : + Model +
+
+
+ + name + + : + string +
+
+
+
+
+ +

+ + + this.addModel models + + +

+
+
+
+ Full Usage: + this.addModel models +
+
+ Parameters: +
    + + + models + + : + (Model * string)[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + models + + : + (Model * string)[] +
+
+
+
+
+ +

+ + + this.addModel models + + +

+
+
+
+ Full Usage: + this.addModel models +
+
+ Parameters: +
    + + + models + + : + Model[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + models + + : + Model[] +
+
+
+
+
+ +

+ + + this.addModel (model, name) + + +

+
+
+
+ Full Usage: + this.addModel (model, name) +
+
+ Parameters: +
    + + + model + + : + ModelBase + +
    + + + name + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + model + + : + ModelBase +
+
+
+ + name + + : + string +
+
+
+
+
+ +

+ + + this.addModel models + + +

+
+
+
+ Full Usage: + this.addModel models +
+
+ Parameters: +
    + + + models + + : + (ModelBase * string)[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + models + + : + (ModelBase * string)[] +
+
+
+
+
+ +

+ + + this.addModel models + + +

+
+
+
+ Full Usage: + this.addModel models +
+
+ Parameters: + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + models + + : + ModelBase[] +
+
+
+
+
+ +

+ + + this.addParameter (parameter, name) + + +

+
+
+
+ Full Usage: + this.addParameter (parameter, name) +
+
+ Parameters: +
    + + + parameter + + : + Parameter + +
    + + + name + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + parameter + + : + Parameter +
+
+
+ + name + + : + string +
+
+
+
+
+ +

+ + + this.addParameter parameters + + +

+
+
+
+ Full Usage: + this.addParameter parameters +
+
+ Parameters: +
    + + + parameters + + : + (Parameter * string)[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + parameters + + : + (Parameter * string)[] +
+
+
+
+
+ +

+ + + this.addParameter parameters + + +

+
+
+
+ Full Usage: + this.addParameter parameters +
+
+ Parameters: + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + parameters + + : + Parameter[] +
+
+
+
+
+ +

+ + + this.backend + + +

+
+
+
+ Full Usage: + this.backend +
+
+ + Returns: + Backend + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Backend +
+
+
+
+
+ +

+ + + this.buffers + + +

+
+
+
+ Full Usage: + this.buffers +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.buffersVector + + +

+
+
+
+ Full Usage: + this.buffersVector +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.children + + +

+
+
+
+ Full Usage: + this.children +
+
+ + Returns: + ModelBase list + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + ModelBase list +
+
+
+
+
+ +

+ + + this.clone () + + +

+
+
+
+ Full Usage: + this.clone () +
+
+ + Returns: + ModelBase + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + ModelBase +
+
+
+
+
+ +

+ + + this.descendants + + +

+
+
+
+ Full Usage: + this.descendants +
+
+ + Returns: + ModelBase list + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + ModelBase list +
+
+
+
+
+ +

+ + + this.device + + +

+
+
+
+ Full Usage: + this.device +
+
+ + Returns: + Device + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Device +
+
+
+
+
+ +

+ + + this.dtype + + +

+
+
+
+ Full Usage: + this.dtype +
+
+ + Returns: + Dtype + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + this.eval () + + +

+
+
+
+ Full Usage: + this.eval () +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.forwardDiff (derivatives, ?nestingTag) + + +

+
+
+
+ Full Usage: + this.forwardDiff (derivatives, ?nestingTag) +
+
+ Parameters: +
    + + + derivatives + + : + ParameterDict + - + The derivatives of the parameters + +
    + + + ?nestingTag + + : + uint32 + - + The level tag for nested differentiation. Defaults to the current global nesting level + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Adjust the parameters of the model to initiate a new level of forward-mode automatic differentiation. + +

+
+

+ + After this call the current parameters of the model will have attached derivatives for forward mode differentiation. + +

+
+
+ + derivatives + + : + ParameterDict +
+
+

+ The derivatives of the parameters +

+
+
+ + ?nestingTag + + : + uint32 +
+
+

+ The level tag for nested differentiation. Defaults to the current global nesting level +

+
+
+
+
+ +

+ + + this.hasOwnBuffers + + +

+
+
+
+ Full Usage: + this.hasOwnBuffers +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.hasOwnParameters + + +

+
+
+
+ Full Usage: + this.hasOwnParameters +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.hasOwnState + + +

+
+
+
+ Full Usage: + this.hasOwnState +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.init f + + +

+
+
+
+ Full Usage: + this.init f +
+
+ Parameters: + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + string * Tensor -> Tensor +
+
+
+
+
+ +

+ + + this.isForwardDiff + + +

+
+
+
+ Full Usage: + this.isForwardDiff +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.isNoDiff + + +

+
+
+
+ Full Usage: + this.isNoDiff +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.isReverseDiff + + +

+
+
+
+ Full Usage: + this.isReverseDiff +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.move (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.move (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Moves the state (parameters and buffers) of the model to the given configuration +

+
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ +

+ + + this.nbuffers + + +

+
+
+
+ Full Usage: + this.nbuffers +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.noDiff () + + +

+
+
+
+ Full Usage: + this.noDiff () +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.nparameters + + +

+
+
+
+ Full Usage: + this.nparameters +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ Gets the number of parameters of the Model +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.nstate + + +

+
+
+
+ Full Usage: + this.nstate +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.parameters + + +

+
+
+
+ Full Usage: + this.parameters +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.parametersVector + + +

+
+
+
+ Full Usage: + this.parametersVector +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.reverseDiff ?nestingTag + + +

+
+
+
+ Full Usage: + this.reverseDiff ?nestingTag +
+
+ Parameters: +
    + + + ?nestingTag + + : + uint32 + - + The level tag for nested differentiation. Defaults to the current global nesting level + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Adjust the parameters of the model to initiate a new level of reverse-mode automatic differentiation. + +

+
+

+ + After this call the current parameters of the model will support reverse-mode differentiation. After the completion + of the corresponding reverse operation, the computed derivatives will be available. + +

+
+
+ + ?nestingTag + + : + uint32 +
+
+

+ The level tag for nested differentiation. Defaults to the current global nesting level +

+
+
+
+
+ +

+ + + this.state + + +

+
+
+
+ Full Usage: + this.state +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.stateVector + + +

+
+
+
+ Full Usage: + this.stateVector +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.summary () + + +

+
+
+
+ Full Usage: + this.summary () +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + this.train () + + +

+
+
+
+ Full Usage: + this.train () +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-parameter.html b/reference/furnace-model-parameter.html new file mode 100644 index 00000000..72e47bed --- /dev/null +++ b/reference/furnace-model-parameter.html @@ -0,0 +1,686 @@ + + + + + Parameter (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Parameter Type +

+ +
+
+

+ Represents a parameter. +

+
+

+ A parameter is a mutable register holding a tensor. +

+
+
+
+
+
+
+

+ Record fields +

+ + + + + + + + + + + + + +
+ Record Field + + Description +
+
+ +

+ + + value + + +

+
+
+
+ Full Usage: + value +
+
+ + Field type: + Tensor + +
+ Modifiers: + mutable +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + Tensor +
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Parameter(value) + + +

+
+
+
+ Full Usage: + Parameter(value) +
+
+ Parameters: +
    + + + value + + : + Tensor + +
    +
+
+ + Returns: + Parameter + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + Tensor +
+
+
+
+
+ + Returns: + + Parameter +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.copy () + + +

+
+
+
+ Full Usage: + this.copy () +
+
+ + Returns: + Parameter + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Parameter +
+
+
+
+
+ +

+ + + this.forwardDiff (derivative, ?nestingTag) + + +

+
+
+
+ Full Usage: + this.forwardDiff (derivative, ?nestingTag) +
+
+ Parameters: +
    + + + derivative + + : + Tensor + +
    + + + ?nestingTag + + : + uint32 + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + derivative + + : + Tensor +
+
+
+ + ?nestingTag + + : + uint32 +
+
+
+
+
+ +

+ + + this.move (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.move (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ +

+ + + this.noDiff () + + +

+
+
+
+ Full Usage: + this.noDiff () +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.reverseDiff ?nestingTag + + +

+
+
+
+ Full Usage: + this.reverseDiff ?nestingTag +
+
+ Parameters: +
    + + + ?nestingTag + + : + uint32 + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + ?nestingTag + + : + uint32 +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-parameterdict.html b/reference/furnace-model-parameterdict.html new file mode 100644 index 00000000..21e19c0b --- /dev/null +++ b/reference/furnace-model-parameterdict.html @@ -0,0 +1,1775 @@ + + + + + ParameterDict (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ParameterDict Type +

+ +
+
+

+ Represents a collection of named parameters. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + ParameterDict() + + +

+
+
+
+ Full Usage: + ParameterDict() +
+
+ + Returns: + ParameterDict + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + ParameterDict +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.[key] + + +

+
+
+
+ Full Usage: + this.[key] +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + this.add parameters + + +

+
+
+
+ Full Usage: + this.add parameters +
+
+ Parameters: + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + parameters + + : + ParameterDict +
+
+
+
+
+ +

+ + + this.add parameters + + +

+
+
+
+ Full Usage: + this.add parameters +
+
+ Parameters: +
    + + + parameters + + : + (string * Parameter) list + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + parameters + + : + (string * Parameter) list +
+
+
+
+
+ +

+ + + this.add (name, parameter) + + +

+
+
+
+ Full Usage: + this.add (name, parameter) +
+
+ Parameters: +
    + + + name + + : + 'a + +
    + + + parameter + + : + Parameter + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + name + + : + 'a +
+
+
+ + parameter + + : + Parameter +
+
+
+
+
+ +

+ + + this.backend + + +

+
+
+
+ Full Usage: + this.backend +
+
+ + Returns: + Backend + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Backend +
+
+
+
+
+ +

+ + + this.clear () + + +

+
+
+
+ Full Usage: + this.clear () +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.copy () + + +

+
+
+
+ Full Usage: + this.copy () +
+
+ + Returns: + ParameterDict + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ + This method discards differentiability and returns a ParameterDict containing parameters that are constant tensors. + +

+
+
+ + Returns: + + ParameterDict +
+
+
+
+
+ +

+ + + this.count + + +

+
+
+
+ Full Usage: + this.count +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.device + + +

+
+
+
+ Full Usage: + this.device +
+
+ + Returns: + Device + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Device +
+
+
+
+
+ +

+ + + this.dtype + + +

+
+
+
+ Full Usage: + this.dtype +
+
+ + Returns: + Dtype + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + this.flatten ?differentiable + + +

+
+
+
+ Full Usage: + this.flatten ?differentiable +
+
+ Parameters: +
    + + + ?differentiable + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + ?differentiable + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.forwardDiff (derivatives, ?nestingTag) + + +

+
+
+
+ Full Usage: + this.forwardDiff (derivatives, ?nestingTag) +
+
+ Parameters: +
    + + + derivatives + + : + ParameterDict + - + The derivatives of the parameters + +
    + + + ?nestingTag + + : + uint32 + - + The level tag for nested differentiation. Defaults to the current global nesting level + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Adjust the parameters to include support for forward-mode automatic differentiation. + +

+
+

+ + After this call the current parameters in this dictionary will have attached derivatives for forward mode differentiation. + +

+
+
+ + derivatives + + : + ParameterDict +
+
+

+ The derivatives of the parameters +

+
+
+ + ?nestingTag + + : + uint32 +
+
+

+ The level tag for nested differentiation. Defaults to the current global nesting level +

+
+
+
+
+ +

+ + + this.iter f + + +

+
+
+
+ Full Usage: + this.iter f +
+
+ Parameters: +
    + + + f + + : + string * Parameter -> unit + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + string * Parameter -> unit +
+
+
+
+
+ +

+ + + this.map f + + +

+
+
+
+ Full Usage: + this.map f +
+
+ Parameters: + +
+ + Returns: + ParameterDict + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Parameter -> Parameter +
+
+
+
+
+ + Returns: + + ParameterDict +
+
+
+
+
+ +

+ + + this.map f + + +

+
+
+
+ Full Usage: + this.map f +
+
+ Parameters: + +
+ + Returns: + ParameterDict + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + string * Parameter -> string * Parameter +
+
+
+
+
+ + Returns: + + ParameterDict +
+
+
+
+
+ +

+ + + this.move (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.move (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ +

+ + + this.nelement + + +

+
+
+
+ Full Usage: + this.nelement +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.noDiff () + + +

+
+
+
+ Full Usage: + this.noDiff () +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.reverseDiff ?nestingTag + + +

+
+
+
+ Full Usage: + this.reverseDiff ?nestingTag +
+
+ Parameters: +
    + + + ?nestingTag + + : + uint32 + - + The level tag for nested differentiation. Defaults to the current global nesting level + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Adjust the parameters to include support for reverse-mode automatic differentiation. + +

+
+

+ + After this call the current parameters in this dictionary will support reverse-mode differentiation. After the completion + of the corresponding reverse operation, the computed derivative + will be available. + +

+
+
+ + ?nestingTag + + : + uint32 +
+
+

+ The level tag for nested differentiation. Defaults to the current global nesting level +

+
+
+
+
+ +

+ + + this.set (other, ?differentiable, ?strict) + + +

+
+
+
+ Full Usage: + this.set (other, ?differentiable, ?strict) +
+
+ Parameters: +
    + + + other + + : + ParameterDict + +
    + + + ?differentiable + + : + bool + +
    + + + ?strict + + : + bool + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + other + + : + ParameterDict +
+
+
+ + ?differentiable + + : + bool +
+
+
+ + ?strict + + : + bool +
+
+
+
+
+ +

+ + + this.unflatten (tensors, ?differentiable) + + +

+
+
+
+ Full Usage: + this.unflatten (tensors, ?differentiable) +
+
+ Parameters: +
    + + + tensors + + : + Tensor + +
    + + + ?differentiable + + : + bool + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + tensors + + : + Tensor +
+
+
+ + ?differentiable + + : + bool +
+
+
+
+
+ +

+ + + this.unflattenToNew tensors + + +

+
+
+
+ Full Usage: + this.unflattenToNew tensors +
+
+ Parameters: +
    + + + tensors + + : + Tensor + +
    +
+
+ + Returns: + ParameterDict + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + tensors + + : + Tensor +
+
+
+
+
+ + Returns: + + ParameterDict +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-recurrentshape.html b/reference/furnace-model-recurrentshape.html new file mode 100644 index 00000000..faed2742 --- /dev/null +++ b/reference/furnace-model-recurrentshape.html @@ -0,0 +1,1248 @@ + + + + + RecurrentShape (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ RecurrentShape Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + LSTMCellSequenceWithHidden input hidden cell inputSize hiddenSize + + +

+
+
+
+ Full Usage: + LSTMCellSequenceWithHidden input hidden cell inputSize hiddenSize +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + cell + + : + Tensor + +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + cell + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+
+
+ +

+ + + LSTMCellWithHidden input hidden cell inputSize hiddenSize + + +

+
+
+
+ Full Usage: + LSTMCellWithHidden input hidden cell inputSize hiddenSize +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + cell + + : + Tensor + +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + cell + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+
+
+ +

+ + + LSTMWithHidden input hidden cell inputSize hiddenSize batchFirst numLayers numDirections + + +

+
+
+
+ Full Usage: + LSTMWithHidden input hidden cell inputSize hiddenSize batchFirst numLayers numDirections +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + cell + + : + Tensor + +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    + + + batchFirst + + : + bool + +
    + + + numLayers + + : + int + +
    + + + numDirections + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + cell + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+ + batchFirst + + : + bool +
+
+
+ + numLayers + + : + int +
+
+
+ + numDirections + + : + int +
+
+
+
+
+ +

+ + + RNN input inputSize batchFirst + + +

+
+
+
+ Full Usage: + RNN input inputSize batchFirst +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + inputSize + + : + int + +
    + + + batchFirst + + : + bool + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+ + batchFirst + + : + bool +
+
+
+
+
+ +

+ + + RNNCell input inputSize + + +

+
+
+
+ Full Usage: + RNNCell input inputSize +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + inputSize + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+
+
+ +

+ + + RNNCellSequence input inputSize + + +

+
+
+
+ Full Usage: + RNNCellSequence input inputSize +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + inputSize + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+
+
+ +

+ + + RNNCellSequenceWithHidden input hidden inputSize hiddenSize + + +

+
+
+
+ Full Usage: + RNNCellSequenceWithHidden input hidden inputSize hiddenSize +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+
+
+ +

+ + + RNNCellWithHidden input hidden inputSize hiddenSize + + +

+
+
+
+ Full Usage: + RNNCellWithHidden input hidden inputSize hiddenSize +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+
+
+ +

+ + + RNNWithHidden input hidden inputSize hiddenSize batchFirst numLayers numDirections + + +

+
+
+
+ Full Usage: + RNNWithHidden input hidden inputSize hiddenSize batchFirst numLayers numDirections +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    + + + batchFirst + + : + bool + +
    + + + numLayers + + : + int + +
    + + + numDirections + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+ + batchFirst + + : + bool +
+
+
+ + numLayers + + : + int +
+
+
+ + numDirections + + : + int +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-rnn.html b/reference/furnace-model-rnn.html new file mode 100644 index 00000000..22027c4c --- /dev/null +++ b/reference/furnace-model-rnn.html @@ -0,0 +1,672 @@ + + + + + RNN (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ RNN Type +

+ +
+
+

+ Recurrent neural network. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + RNN(inputSize, hiddenSize, ?numLayers, ?nonlinearity, ?bias, ?batchFirst, ?dropout, ?bidirectional) + + +

+
+
+
+ Full Usage: + RNN(inputSize, hiddenSize, ?numLayers, ?nonlinearity, ?bias, ?batchFirst, ?dropout, ?bidirectional) +
+
+ Parameters: +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    + + + ?numLayers + + : + int + +
    + + + ?nonlinearity + + : + Tensor -> Tensor + +
    + + + ?bias + + : + bool + +
    + + + ?batchFirst + + : + bool + +
    + + + ?dropout + + : + float + +
    + + + ?bidirectional + + : + bool + +
    +
+
+ + Returns: + RNN + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+ + ?numLayers + + : + int +
+
+
+ + ?nonlinearity + + : + Tensor -> Tensor +
+
+
+ + ?bias + + : + bool +
+
+
+ + ?batchFirst + + : + bool +
+
+
+ + ?dropout + + : + float +
+
+
+ + ?bidirectional + + : + bool +
+
+
+
+
+ + Returns: + + RNN +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.forwardWithHidden (input, hidden) + + +

+
+
+
+ Full Usage: + this.forwardWithHidden (input, hidden) +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.hiddenSize + + +

+
+
+
+ Full Usage: + this.hiddenSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.inputSize + + +

+
+
+
+ Full Usage: + this.inputSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.newHidden batchSize + + +

+
+
+
+ Full Usage: + this.newHidden batchSize +
+
+ Parameters: +
    + + + batchSize + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + batchSize + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-rnncell.html b/reference/furnace-model-rnncell.html new file mode 100644 index 00000000..83deb3f2 --- /dev/null +++ b/reference/furnace-model-rnncell.html @@ -0,0 +1,796 @@ + + + + + RNNCell (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ RNNCell Type +

+ +
+
+

+ Unit cell of a recurrent neural network. Prefer using the RNN class instead, which can combine RNNCells in multiple layers. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + RNNCell(inputSize, hiddenSize, ?nonlinearity, ?bias, ?checkShapes) + + +

+
+
+
+ Full Usage: + RNNCell(inputSize, hiddenSize, ?nonlinearity, ?bias, ?checkShapes) +
+
+ Parameters: +
    + + + inputSize + + : + int + +
    + + + hiddenSize + + : + int + +
    + + + ?nonlinearity + + : + Tensor -> Tensor + +
    + + + ?bias + + : + bool + +
    + + + ?checkShapes + + : + bool + +
    +
+
+ + Returns: + RNNCell + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + inputSize + + : + int +
+
+
+ + hiddenSize + + : + int +
+
+
+ + ?nonlinearity + + : + Tensor -> Tensor +
+
+
+ + ?bias + + : + bool +
+
+
+ + ?checkShapes + + : + bool +
+
+
+
+
+ + Returns: + + RNNCell +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.forwardSequence input + + +

+
+
+
+ Full Usage: + this.forwardSequence input +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.forwardSequenceWithHidden (input, hidden) + + +

+
+
+
+ Full Usage: + this.forwardSequenceWithHidden (input, hidden) +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.forwardWithHidden (input, hidden) + + +

+
+
+
+ Full Usage: + this.forwardWithHidden (input, hidden) +
+
+ Parameters: +
    + + + input + + : + Tensor + +
    + + + hidden + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + input + + : + Tensor +
+
+
+ + hidden + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.hiddenSize + + +

+
+
+
+ Full Usage: + this.hiddenSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.inputSize + + +

+
+
+
+ Full Usage: + this.inputSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.newHidden batchSize + + +

+
+
+
+ Full Usage: + this.newHidden batchSize +
+
+ Parameters: +
    + + + batchSize + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + batchSize + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-sequential.html b/reference/furnace-model-sequential.html new file mode 100644 index 00000000..9824bc3f --- /dev/null +++ b/reference/furnace-model-sequential.html @@ -0,0 +1,258 @@ + + + + + Sequential (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Sequential Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Sequential(models) + + +

+
+
+
+ Full Usage: + Sequential(models) +
+
+ Parameters: +
    + + + models + + : + seq<Model> + +
    +
+
+ + Returns: + Sequential + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + models + + : + seq<Model> +
+
+
+
+
+ + Returns: + + Sequential +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-vae.html b/reference/furnace-model-vae.html new file mode 100644 index 00000000..8bc5a446 --- /dev/null +++ b/reference/furnace-model-vae.html @@ -0,0 +1,306 @@ + + + + + VAE (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ VAE Type +

+ +
+
+

+ Variational auto-encoder +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + VAE(xShape, zDim, encoder, decoder) + + +

+
+
+
+ Full Usage: + VAE(xShape, zDim, encoder, decoder) +
+
+ Parameters: +
    + + + xShape + + : + seq<int> + +
    + + + zDim + + : + int + +
    + + + encoder + + : + Model + +
    + + + decoder + + : + Model + +
    +
+
+ + Returns: + VAE + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + xShape + + : + seq<int> +
+
+
+ + zDim + + : + int +
+
+
+ + encoder + + : + Model +
+
+
+ + decoder + + : + Model +
+
+
+
+
+ + Returns: + + VAE +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-vaebase.html b/reference/furnace-model-vaebase.html new file mode 100644 index 00000000..bf6a1595 --- /dev/null +++ b/reference/furnace-model-vaebase.html @@ -0,0 +1,832 @@ + + + + + VAEBase (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ VAEBase Type +

+ +
+
+

+ Variational auto-encoder base +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + VAEBase(zDim) + + +

+
+
+
+ Full Usage: + VAEBase(zDim) +
+
+ Parameters: +
    + + + zDim + + : + int + +
    +
+
+ + Returns: + VAEBase + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + zDim + + : + int +
+
+
+
+
+ + Returns: + + VAEBase +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.decode arg1 + + +

+
+
+
+ Full Usage: + this.decode arg1 +
+
+ Parameters: +
    + + + arg0 + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + arg0 + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.encode arg1 + + +

+
+
+
+ Full Usage: + this.encode arg1 +
+
+ Parameters: +
    + + + arg0 + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + arg0 + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.encodeDecode x + + +

+
+
+
+ Full Usage: + this.encodeDecode x +
+
+ Parameters: +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + this.loss (x, ?normalize) + + +

+
+
+
+ Full Usage: + this.loss (x, ?normalize) +
+
+ Parameters: +
    + + + x + + : + Tensor + +
    + + + ?normalize + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + x + + : + Tensor +
+
+
+ + ?normalize + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sample ?numSamples + + +

+
+
+
+ Full Usage: + this.sample ?numSamples +
+
+ Parameters: +
    + + + ?numSamples + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?numSamples + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + VAEBase.loss (xRecon, x, mu, logVar) + + +

+
+
+
+ Full Usage: + VAEBase.loss (xRecon, x, mu, logVar) +
+
+ Parameters: +
    + + + xRecon + + : + Tensor + +
    + + + x + + : + Tensor + +
    + + + mu + + : + Tensor + +
    + + + logVar + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + xRecon + + : + Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + mu + + : + Tensor +
+
+
+ + logVar + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-vaemlp.html b/reference/furnace-model-vaemlp.html new file mode 100644 index 00000000..eb32403e --- /dev/null +++ b/reference/furnace-model-vaemlp.html @@ -0,0 +1,322 @@ + + + + + VAEMLP (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ VAEMLP Type +

+ +
+
+

+ Variational auto-encoder with multilayer perceptron (MLP) encoder and decoder. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + VAEMLP(xDim, zDim, ?hDims, ?nonlinearity, ?nonlinearityLast) + + +

+
+
+
+ Full Usage: + VAEMLP(xDim, zDim, ?hDims, ?nonlinearity, ?nonlinearityLast) +
+
+ Parameters: +
    + + + xDim + + : + int + +
    + + + zDim + + : + int + +
    + + + ?hDims + + : + seq<int> + +
    + + + ?nonlinearity + + : + Tensor -> Tensor + +
    + + + ?nonlinearityLast + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + VAEMLP + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + xDim + + : + int +
+
+
+ + zDim + + : + int +
+
+
+ + ?hDims + + : + seq<int> +
+
+
+ + ?nonlinearity + + : + Tensor -> Tensor +
+
+
+ + ?nonlinearityLast + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + VAEMLP +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model-weight.html b/reference/furnace-model-weight.html new file mode 100644 index 00000000..d3bf7e24 --- /dev/null +++ b/reference/furnace-model-weight.html @@ -0,0 +1,384 @@ + + + + + Weight (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Weight Type +

+ +
+
+

+ Contains functionality related to generating initial parameter weights for models. +

+
+
+
+
+
+
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + Weight.kaiming (fanIn, fanOut, ?a) + + +

+
+
+
+ Full Usage: + Weight.kaiming (fanIn, fanOut, ?a) +
+
+ Parameters: +
    + + + fanIn + + : + int + +
    + + + fanOut + + : + int + +
    + + + ?a + + : + float + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + fanIn + + : + int +
+
+
+ + fanOut + + : + int +
+
+
+ + ?a + + : + float +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Weight.uniform (shape, k) + + +

+
+
+
+ Full Usage: + Weight.uniform (shape, k) +
+
+ Parameters: +
    + + + shape + + : + seq<int> + +
    + + + k + + : + float + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + shape + + : + seq<int> +
+
+
+ + k + + : + float +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-model.html b/reference/furnace-model.html new file mode 100644 index 00000000..d5a96823 --- /dev/null +++ b/reference/furnace-model.html @@ -0,0 +1,1020 @@ + + + + + Furnace.Model + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Furnace.Model Namespace +

+
+

+ Contains types and functionality related to describing models. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type/Module + + Description +
+

+ + + BatchNorm1d + + +

+
+
+ + + + + + +

+ Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputs with optional additional channel dimension) +

+
+
+

+ + + BatchNorm2d + + +

+
+
+ + + + + + +

+ Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with optional additional channel dimension) +

+
+
+

+ + + BatchNorm3d + + +

+
+
+ + + + + + +

+ Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs with optional additional channel dimension) +

+
+
+

+ + + Conv1d + + +

+
+
+ + + + + + +

+ A model that applies a 1D convolution over an input signal composed of several input planes +

+
+
+

+ + + Conv2d + + +

+
+
+ + + + + + +

+ A model that applies a 2D convolution over an input signal composed of several input planes +

+
+
+

+ + + Conv3d + + +

+
+
+ + + + + + +

+ A model that applies a 3D convolution over an input signal composed of several input planes +

+
+
+

+ + + ConvTranspose1d + + +

+
+
+ + + + + + +

+ A model that applies a 1D transposed convolution operator over an input image composed of several input planes. +

+
+
+

+ + + ConvTranspose2d + + +

+
+
+ + + + + + +

+ A model that applies a 2D transposed convolution operator over an input image composed of several input planes. +

+
+
+

+ + + ConvTranspose3d + + +

+
+
+ + + + + + +

+ A model that applies a 3D transposed convolution operator over an input image composed of several input planes. +

+
+
+

+ + + Dropout + + +

+
+
+ + + + + + +

+ A model which during training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution. +

+
+
+

+ + + Dropout2d + + +

+
+
+ + + + + + +

+ A model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. +

+
+
+

+ + + Dropout3d + + +

+
+
+ + + + + + +

+ A model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. +

+
+
+

+ + + Linear + + +

+
+
+ + + + + + +

+ A model that applies a linear transformation to the incoming data: \(y = xA^T + b\) +

+
+
+

+ + + LSTM + + +

+
+
+ + + + + + +

+ Long short-term memory (LSTM) recurrent neural network. +

+
+
+

+ + + LSTMCell + + +

+
+
+ + + + + + +

+ Unit cell of a long short-term memory (LSTM) recurrent neural network. Prefer using the RNN class instead, which can combine RNNCells in multiple layers. +

+
+
+

+ + + Mode + + +

+
+
+ + + + + + +

+ Indicates the training or evaluation mode for a model. +

+
+
+

+ + + Model + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Model<'In, 'Out> + + +

+
+
+ + + + + + +

+ Represents a model, primarily a collection of named parameters and sub-models and a function governed by them. +

+
+
+

+ + + ModelBase + + +

+
+
+ + + + + + +

+ Represents the base class of all models. +

+
+
+

+ + + Parameter + + +

+
+
+ + + + + + +

+ Represents a parameter. +

+
+
+

+ + + ParameterDict + + +

+
+
+ + + + + + +

+ Represents a collection of named parameters. +

+
+
+

+ + + RecurrentShape + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + RNN + + +

+
+
+ + + + + + +

+ Recurrent neural network. +

+
+
+

+ + + RNNCell + + +

+
+
+ + + + + + +

+ Unit cell of a recurrent neural network. Prefer using the RNN class instead, which can combine RNNCells in multiple layers. +

+
+
+

+ + + Sequential + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + VAE + + +

+
+
+ + + + + + +

+ Variational auto-encoder +

+
+
+

+ + + VAEBase + + +

+
+
+ + + + + + +

+ Variational auto-encoder base +

+
+
+

+ + + VAEMLP + + +

+
+
+ + + + + + +

+ Variational auto-encoder with multilayer perceptron (MLP) encoder and decoder. +

+
+
+

+ + + Weight + + +

+
+
+ + + + + + +

+ Contains functionality related to generating initial parameter weights for models. +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-numerical-shorten.html b/reference/furnace-numerical-shorten.html new file mode 100644 index 00000000..84e04012 --- /dev/null +++ b/reference/furnace-numerical-shorten.html @@ -0,0 +1,1650 @@ + + + + + Shorten (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Shorten Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + FurnaceImage.numfg f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfg f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfgh f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfgh f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfgvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numfgvp f x v +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfh f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfh f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfhvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numfhvp f x v +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfj f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfj f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfjvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numfjvp f x v +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numg f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numg f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numgh f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numgh f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numgvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numgvp f x v +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numh f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numh f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numhvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numhvp f x v +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numj f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numj f x +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numjvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numjvp f x v +
+
+ Parameters: +
    + + + f + + : + float + +
    + + + x + + : + Tensor -> Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor -> Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + float +
+
+
+ + x + + : + Tensor -> Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-numerical.html b/reference/furnace-numerical.html new file mode 100644 index 00000000..beae9885 --- /dev/null +++ b/reference/furnace-numerical.html @@ -0,0 +1,3285 @@ + + + + + Numerical (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Numerical Module +

+ +
+
+

+ +

+
+
+
+

+ Nested modules +

+ + + + + + + + + + + + + +
+ Modules + + Description +
+

+ + + Shorten + + +

+
+
+ + + + + + +

+ +

+
+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + FurnaceImage.numcurl epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numcurl epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numcurldivergence epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numcurldivergence epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numdiff epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numdiff epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numdiff2 epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numdiff2 epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numdivergence epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numdivergence epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfcurl epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfcurl epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfcurldivergence epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfcurldivergence epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfdiff epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfdiff epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfdiff2 epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfdiff2 epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfdivergence epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfdivergence epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfgrad epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfgrad epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfgradhessian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfgradhessian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfgradv epsilon f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numfgradv epsilon f x v +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfhessian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfhessian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfhessianv epsilon f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numfhessianv epsilon f x v +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfjacobian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numfjacobian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numfjacobianv epsilon f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numfjacobianv epsilon f x v +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numflaplacian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numflaplacian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numgrad epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numgrad epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numgradhessian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numgradhessian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numgradv epsilon f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numgradv epsilon f x v +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numhessian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numhessian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numhessianv epsilon f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numhessianv epsilon f x v +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numjacobian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numjacobian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numjacobianv epsilon f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.numjacobianv epsilon f x v +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    + + + v + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.numlaplacian epsilon f x + + +

+
+
+
+ Full Usage: + FurnaceImage.numlaplacian epsilon f x +
+
+ Parameters: +
    + + + epsilon + + : + float + +
    + + + f + + : + Tensor -> Tensor + +
    + + + x + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + epsilon + + : + float +
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-opavgpoolextensions.html b/reference/furnace-opavgpoolextensions.html new file mode 100644 index 00000000..69b9ea39 --- /dev/null +++ b/reference/furnace-opavgpoolextensions.html @@ -0,0 +1,1298 @@ + + + + + OpAvgPoolExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OpAvgPoolExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.avgpool1d (kernelSize, ?stride, ?padding) + + +

+
+
+
+ Full Usage: + this.avgpool1d (kernelSize, ?stride, ?padding) +
+
+ Parameters: +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+

+ Extended Type: + Tensor +

+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.avgpool1d (input, kernelSize, ?stride, ?padding) + + +

+
+
+
+ Full Usage: + FurnaceImage.avgpool1d (input, kernelSize, ?stride, ?padding) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.avgpool2d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + this.avgpool2d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+

+ Extended Type: + Tensor +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.avgpool2d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.avgpool2d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.avgpool3d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + this.avgpool3d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+

+ Extended Type: + Tensor +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.avgpool3d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + FurnaceImage.avgpool3d (input, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + input + + : + Tensor + - + The input tensor. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D average pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + input + + : + Tensor +
+
+

+ The input tensor. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-opbmmextensions.html b/reference/furnace-opbmmextensions.html new file mode 100644 index 00000000..3a67787b --- /dev/null +++ b/reference/furnace-opbmmextensions.html @@ -0,0 +1,374 @@ + + + + + OpBMMExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OpBMMExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.bmm b + + +

+
+
+
+ Full Usage: + this.bmm b +
+
+ Parameters: +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Batched matrix product of two tensors. Tensors b must be 3d tensors each containing the same number of matrices. If the tensor is a \(b \times n \times m\) tensor, and b is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor. +

+
+

+ Extended Type: + Tensor +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.bmm (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.bmm (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Batched matrix product of two tensors. Tensors a and b must be 3d tensors each containing the same number of matrices. If a is a \(b \times n \times m\) tensor, b is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor. +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-opdetextensions.html b/reference/furnace-opdetextensions.html new file mode 100644 index 00000000..b0531b55 --- /dev/null +++ b/reference/furnace-opdetextensions.html @@ -0,0 +1,340 @@ + + + + + OpDetExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OpDetExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.det () + + +

+
+
+
+ Full Usage: + this.det () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.det a + + +

+
+
+
+ Full Usage: + FurnaceImage.det a +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-opinvextensions.html b/reference/furnace-opinvextensions.html new file mode 100644 index 00000000..184b0bba --- /dev/null +++ b/reference/furnace-opinvextensions.html @@ -0,0 +1,340 @@ + + + + + OpInvExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OpInvExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.inv () + + +

+
+
+
+ Full Usage: + this.inv () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.inv a + + +

+
+
+
+ Full Usage: + FurnaceImage.inv a +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-opnormextensions.html b/reference/furnace-opnormextensions.html new file mode 100644 index 00000000..fd23d46d --- /dev/null +++ b/reference/furnace-opnormextensions.html @@ -0,0 +1,420 @@ + + + + + OpNormExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OpNormExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.norm (?order, ?dim, ?keepDim) + + +

+
+
+
+ Full Usage: + this.norm (?order, ?dim, ?keepDim) +
+
+ Parameters: +
    + + + ?order + + : + float + +
    + + + ?dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + ?order + + : + float +
+
+
+ + ?dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.norm (a, ?order, ?dim, ?keepDim) + + +

+
+
+
+ Full Usage: + FurnaceImage.norm (a, ?order, ?dim, ?keepDim) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    + + + ?order + + : + float + +
    + + + ?dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + a + + : + Tensor +
+
+
+ + ?order + + : + float +
+
+
+ + ?dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-opouterextensions.html b/reference/furnace-opouterextensions.html new file mode 100644 index 00000000..54af3cbe --- /dev/null +++ b/reference/furnace-opouterextensions.html @@ -0,0 +1,374 @@ + + + + + OpOuterExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OpOuterExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.outer b + + +

+
+
+
+ Full Usage: + this.outer b +
+
+ Parameters: +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Outer product of two tensors. +

+
+

+ Extended Type: + Tensor +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.outer (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.outer (a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The first tensor. + +
    + + + b + + : + Tensor + - + The second tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Outer product of two tensors. +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + a + + : + Tensor +
+
+

+ The first tensor. +

+
+
+ + b + + : + Tensor +
+
+

+ The second tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-opsolveextensions.html b/reference/furnace-opsolveextensions.html new file mode 100644 index 00000000..b2c916c1 --- /dev/null +++ b/reference/furnace-opsolveextensions.html @@ -0,0 +1,356 @@ + + + + + OpSolveExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OpSolveExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.solve b + + +

+
+
+
+ Full Usage: + this.solve b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.solve (a, b) + + +

+
+
+
+ Full Usage: + FurnaceImage.solve (a, b) +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-optim-adam.html b/reference/furnace-optim-adam.html new file mode 100644 index 00000000..191bc76e --- /dev/null +++ b/reference/furnace-optim-adam.html @@ -0,0 +1,354 @@ + + + + + Adam (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Adam Type +

+ +
+
+

+ TBD +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Adam(model, ?lr, ?beta1, ?beta2, ?eps, ?weightDecay, ?reversible) + + +

+
+
+
+ Full Usage: + Adam(model, ?lr, ?beta1, ?beta2, ?eps, ?weightDecay, ?reversible) +
+
+ Parameters: +
    + + + model + + : + Model + +
    + + + ?lr + + : + Tensor + +
    + + + ?beta1 + + : + Tensor + +
    + + + ?beta2 + + : + Tensor + +
    + + + ?eps + + : + Tensor + +
    + + + ?weightDecay + + : + Tensor + +
    + + + ?reversible + + : + bool + +
    +
+
+ + Returns: + Adam + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + model + + : + Model +
+
+
+ + ?lr + + : + Tensor +
+
+
+ + ?beta1 + + : + Tensor +
+
+
+ + ?beta2 + + : + Tensor +
+
+
+ + ?eps + + : + Tensor +
+
+
+ + ?weightDecay + + : + Tensor +
+
+
+ + ?reversible + + : + bool +
+
+
+
+
+ + Returns: + + Adam +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-optim-optim.html b/reference/furnace-optim-optim.html new file mode 100644 index 00000000..00015766 --- /dev/null +++ b/reference/furnace-optim-optim.html @@ -0,0 +1,1264 @@ + + + + + optim (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ optim Type +

+ +
+
+

+ TBD +

+
+
+
+
+
+
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + optim.adam (model, dataloader, loss, ?lr, ?beta1, ?beta2, ?eps, ?weightDecay, ?reversible, ?iters, ?epochs, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) + + +

+
+
+
+ Full Usage: + optim.adam (model, dataloader, loss, ?lr, ?beta1, ?beta2, ?eps, ?weightDecay, ?reversible, ?iters, ?epochs, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) +
+
+ Parameters: +
    + + + model + + : + Model + +
    + + + dataloader + + : + DataLoader + +
    + + + loss + + : + Tensor -> Tensor -> Tensor + +
    + + + ?lr + + : + Tensor + +
    + + + ?beta1 + + : + Tensor + +
    + + + ?beta2 + + : + Tensor + +
    + + + ?eps + + : + Tensor + +
    + + + ?weightDecay + + : + Tensor + +
    + + + ?reversible + + : + bool + +
    + + + ?iters + + : + int + +
    + + + ?epochs + + : + int + +
    + + + ?threshold + + : + double + +
    + + + ?print + + : + bool + +
    + + + ?printEvery + + : + int + +
    + + + ?printPrefix + + : + string + +
    + + + ?printPostfix + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + model + + : + Model +
+
+
+ + dataloader + + : + DataLoader +
+
+
+ + loss + + : + Tensor -> Tensor -> Tensor +
+
+
+ + ?lr + + : + Tensor +
+
+
+ + ?beta1 + + : + Tensor +
+
+
+ + ?beta2 + + : + Tensor +
+
+
+ + ?eps + + : + Tensor +
+
+
+ + ?weightDecay + + : + Tensor +
+
+
+ + ?reversible + + : + bool +
+
+
+ + ?iters + + : + int +
+
+
+ + ?epochs + + : + int +
+
+
+ + ?threshold + + : + double +
+
+
+ + ?print + + : + bool +
+
+
+ + ?printEvery + + : + int +
+
+
+ + ?printPrefix + + : + string +
+
+
+ + ?printPostfix + + : + string +
+
+
+
+
+ +

+ + + optim.adam (f, x0, ?lr, ?beta1, ?beta2, ?eps, ?iters, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) + + +

+
+
+
+ Full Usage: + optim.adam (f, x0, ?lr, ?beta1, ?beta2, ?eps, ?iters, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) +
+
+ Parameters: +
    + + + f + + : + Tensor -> Tensor + +
    + + + x0 + + : + Tensor + +
    + + + ?lr + + : + Tensor + +
    + + + ?beta1 + + : + Tensor + +
    + + + ?beta2 + + : + Tensor + +
    + + + ?eps + + : + Tensor + +
    + + + ?iters + + : + int + +
    + + + ?threshold + + : + double + +
    + + + ?print + + : + bool + +
    + + + ?printEvery + + : + int + +
    + + + ?printPrefix + + : + string + +
    + + + ?printPostfix + + : + string + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x0 + + : + Tensor +
+
+
+ + ?lr + + : + Tensor +
+
+
+ + ?beta1 + + : + Tensor +
+
+
+ + ?beta2 + + : + Tensor +
+
+
+ + ?eps + + : + Tensor +
+
+
+ + ?iters + + : + int +
+
+
+ + ?threshold + + : + double +
+
+
+ + ?print + + : + bool +
+
+
+ + ?printEvery + + : + int +
+
+
+ + ?printPrefix + + : + string +
+
+
+ + ?printPostfix + + : + string +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + optim.sgd (model, dataloader, loss, ?lr, ?momentum, ?nesterov, ?weightDecay, ?reversible, ?iters, ?epochs, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) + + +

+
+
+
+ Full Usage: + optim.sgd (model, dataloader, loss, ?lr, ?momentum, ?nesterov, ?weightDecay, ?reversible, ?iters, ?epochs, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) +
+
+ Parameters: +
    + + + model + + : + Model + +
    + + + dataloader + + : + DataLoader + +
    + + + loss + + : + Tensor -> Tensor -> Tensor + +
    + + + ?lr + + : + Tensor + +
    + + + ?momentum + + : + Tensor + +
    + + + ?nesterov + + : + bool + +
    + + + ?weightDecay + + : + Tensor + +
    + + + ?reversible + + : + bool + +
    + + + ?iters + + : + int + +
    + + + ?epochs + + : + int + +
    + + + ?threshold + + : + double + +
    + + + ?print + + : + bool + +
    + + + ?printEvery + + : + int + +
    + + + ?printPrefix + + : + string + +
    + + + ?printPostfix + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + model + + : + Model +
+
+
+ + dataloader + + : + DataLoader +
+
+
+ + loss + + : + Tensor -> Tensor -> Tensor +
+
+
+ + ?lr + + : + Tensor +
+
+
+ + ?momentum + + : + Tensor +
+
+
+ + ?nesterov + + : + bool +
+
+
+ + ?weightDecay + + : + Tensor +
+
+
+ + ?reversible + + : + bool +
+
+
+ + ?iters + + : + int +
+
+
+ + ?epochs + + : + int +
+
+
+ + ?threshold + + : + double +
+
+
+ + ?print + + : + bool +
+
+
+ + ?printEvery + + : + int +
+
+
+ + ?printPrefix + + : + string +
+
+
+ + ?printPostfix + + : + string +
+
+
+
+
+ +

+ + + optim.sgd (f, x0, ?lr, ?momentum, ?nesterov, ?iters, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) + + +

+
+
+
+ Full Usage: + optim.sgd (f, x0, ?lr, ?momentum, ?nesterov, ?iters, ?threshold, ?print, ?printEvery, ?printPrefix, ?printPostfix) +
+
+ Parameters: +
    + + + f + + : + Tensor -> Tensor + +
    + + + x0 + + : + Tensor + +
    + + + ?lr + + : + Tensor + +
    + + + ?momentum + + : + Tensor + +
    + + + ?nesterov + + : + bool + +
    + + + ?iters + + : + int + +
    + + + ?threshold + + : + double + +
    + + + ?print + + : + bool + +
    + + + ?printEvery + + : + int + +
    + + + ?printPrefix + + : + string + +
    + + + ?printPostfix + + : + string + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x0 + + : + Tensor +
+
+
+ + ?lr + + : + Tensor +
+
+
+ + ?momentum + + : + Tensor +
+
+
+ + ?nesterov + + : + bool +
+
+
+ + ?iters + + : + int +
+
+
+ + ?threshold + + : + double +
+
+
+ + ?print + + : + bool +
+
+
+ + ?printEvery + + : + int +
+
+
+ + ?printPrefix + + : + string +
+
+
+ + ?printPostfix + + : + string +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-optim-optimizer.html b/reference/furnace-optim-optimizer.html new file mode 100644 index 00000000..af25813c --- /dev/null +++ b/reference/furnace-optim-optimizer.html @@ -0,0 +1,513 @@ + + + + + Optimizer (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Optimizer Type +

+ +
+
+

+ Represents an optimizer. +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Optimizer(model) + + +

+
+
+
+ Full Usage: + Optimizer(model) +
+
+ Parameters: +
    + + + model + + : + Model + +
    +
+
+ + Returns: + Optimizer + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + model + + : + Model +
+
+
+
+
+ + Returns: + + Optimizer +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.model + + +

+
+
+
+ Full Usage: + this.model +
+
+ + Returns: + Model + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + Returns: + + Model +
+
+
+
+
+ +

+ + + this.stateStep + + +

+
+
+
+ Full Usage: + this.stateStep +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.step () + + +

+
+
+
+ Full Usage: + this.step () +
+
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ +

+ + + this.updateRule arg1 arg2 + + +

+
+
+
+ Full Usage: + this.updateRule arg1 arg2 +
+
+ Parameters: +
    + + + arg0 + + : + string + +
    + + + arg1 + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+
+
+ + arg0 + + : + string +
+
+
+ + arg1 + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-optim-sgd.html b/reference/furnace-optim-sgd.html new file mode 100644 index 00000000..674109df --- /dev/null +++ b/reference/furnace-optim-sgd.html @@ -0,0 +1,338 @@ + + + + + SGD (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ SGD Type +

+ +
+
+

+ TBD +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + SGD(model, ?lr, ?momentum, ?nesterov, ?weightDecay, ?reversible) + + +

+
+
+
+ Full Usage: + SGD(model, ?lr, ?momentum, ?nesterov, ?weightDecay, ?reversible) +
+
+ Parameters: +
    + + + model + + : + Model + +
    + + + ?lr + + : + Tensor + +
    + + + ?momentum + + : + Tensor + +
    + + + ?nesterov + + : + bool + +
    + + + ?weightDecay + + : + Tensor + +
    + + + ?reversible + + : + bool + +
    +
+
+ + Returns: + SGD + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + model + + : + Model +
+
+
+ + ?lr + + : + Tensor +
+
+
+ + ?momentum + + : + Tensor +
+
+
+ + ?nesterov + + : + bool +
+
+
+ + ?weightDecay + + : + Tensor +
+
+
+ + ?reversible + + : + bool +
+
+
+
+
+ + Returns: + + SGD +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-optim.html b/reference/furnace-optim.html new file mode 100644 index 00000000..486f6003 --- /dev/null +++ b/reference/furnace-optim.html @@ -0,0 +1,270 @@ + + + + + Furnace.Optim + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Furnace.Optim Namespace +

+
+

+ Contains types and functionality related to optimizing models and functions. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Description +
+

+ + + Adam + + +

+
+
+ + + + + + +

+ TBD +

+
+
+

+ + + optim + + +

+
+
+ + + + + + +

+ TBD +

+
+
+

+ + + Optimizer + + +

+
+
+ + + + + + +

+ Represents an optimizer. +

+
+
+

+ + + SGD + + +

+
+
+ + + + + + +

+ TBD +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-printer.html b/reference/furnace-printer.html new file mode 100644 index 00000000..1293a811 --- /dev/null +++ b/reference/furnace-printer.html @@ -0,0 +1,568 @@ + + + + + Printer (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Printer Type +

+ +
+
+

+ +

+
+
+
+
+
+
+

+ Union cases +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Union case + + Description +
+
+ +

+ + + Custom(threshold, edgeItems, precision) + + +

+
+
+
+ Full Usage: + Custom(threshold, edgeItems, precision) +
+
+ Parameters: +
    + + + threshold + + : + int + +
    + + + edgeItems + + : + int + +
    + + + precision + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + threshold + + : + int +
+
+
+ + edgeItems + + : + int +
+
+
+ + precision + + : + int +
+
+
+
+
+ +

+ + + Default + + +

+
+
+
+ Full Usage: + Default +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ +

+ + + Full + + +

+
+
+
+ Full Usage: + Full +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ +

+ + + Short + + +

+
+
+
+ Full Usage: + Short +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.edgeItems + + +

+
+
+
+ Full Usage: + this.edgeItems +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.precision + + +

+
+
+
+ Full Usage: + this.precision +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.threshold + + +

+
+
+
+ Full Usage: + this.threshold +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + int +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-printermodule.html b/reference/furnace-printermodule.html new file mode 100644 index 00000000..a1d820ea --- /dev/null +++ b/reference/furnace-printermodule.html @@ -0,0 +1,236 @@ + + + + + Printer (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Printer Module +

+ +
+
+

+ + Contains functions and settings related to print options. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + Default + + +

+
+
+
+ Full Usage: + Default +
+
+ + Returns: + Printer + +
+
+
+
+
+
+ + + + + + +

+ + Get or set the default printer used when printing tensors. Note, use FurnaceImage.config(...) instead. + +

+
+
+
+ + Returns: + + Printer +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-scalar.html b/reference/furnace-scalar.html new file mode 100644 index 00000000..e9fee433 --- /dev/null +++ b/reference/furnace-scalar.html @@ -0,0 +1,159 @@ + + + + + scalar (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ scalar Type +

+ +
+
+

+ + Represents a scalar on the Furnace programming model + +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-scalarextensions.html b/reference/furnace-scalarextensions.html new file mode 100644 index 00000000..e0342793 --- /dev/null +++ b/reference/furnace-scalarextensions.html @@ -0,0 +1,1550 @@ + + + + + ScalarExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ScalarExtensions Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + tryWidenScalar tensorDtype scalar + + +

+
+
+
+ Full Usage: + tryWidenScalar tensorDtype scalar +
+
+ Parameters: +
    + + + tensorDtype + + : + Dtype + +
    + + + scalar + + : + scalar + +
    +
+
+ + Returns: + Dtype voption + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + tensorDtype + + : + Dtype +
+
+
+ + scalar + + : + scalar +
+
+
+
+
+ + Returns: + + Dtype voption +
+
+
+
+
+ +

+ + + widenScalarForDivision tensorDtype scalarDtype + + +

+
+
+
+ Full Usage: + widenScalarForDivision tensorDtype scalarDtype +
+
+ Parameters: +
    + + + tensorDtype + + : + Dtype + +
    + + + scalarDtype + + : + Dtype + +
    +
+
+ + Returns: + Dtype + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + tensorDtype + + : + Dtype +
+
+
+ + scalarDtype + + : + Dtype +
+
+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.cast dtype + + +

+
+
+
+ Full Usage: + this.cast dtype +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    +
+
+ + Returns: + scalar + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + dtype + + : + Dtype +
+
+
+
+
+ + Returns: + + scalar +
+
+
+
+
+ +

+ + + this.dtype () + + +

+
+
+
+ Full Usage: + this.dtype () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + Dtype + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + this.dtype + + +

+
+
+
+ Full Usage: + this.dtype +
+
+ + Returns: + Dtype + +
+ Modifiers: + inline abstract +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + this.log () + + +

+
+
+
+ Full Usage: + this.log () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + scalar + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + scalar +
+
+
+
+
+ +

+ + + this.neg () + + +

+
+
+
+ Full Usage: + this.neg () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + scalar + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + scalar +
+
+
+
+
+ +

+ + + this.sub y + + +

+
+
+
+ Full Usage: + this.sub y +
+
+ Parameters: +
    + + + y + + : + scalar + +
    +
+
+ + Returns: + scalar + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + y + + : + scalar +
+
+
+
+
+ + Returns: + + scalar +
+
+
+
+
+ +

+ + + this.toBool () + + +

+
+
+
+ Full Usage: + this.toBool () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + bool + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.toByte () + + +

+
+
+
+ Full Usage: + this.toByte () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + byte + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + byte +
+
+
+
+
+ +

+ + + this.toDouble () + + +

+
+
+
+ Full Usage: + this.toDouble () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + float + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + this.toInt16 () + + +

+
+
+
+ Full Usage: + this.toInt16 () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + int16 + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + int16 +
+
+
+
+
+ +

+ + + this.toInt32 () + + +

+
+
+
+ Full Usage: + this.toInt32 () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + int + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.toInt64 () + + +

+
+
+
+ Full Usage: + this.toInt64 () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + int64 + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + int64 +
+
+
+
+
+ +

+ + + this.toSByte () + + +

+
+
+
+ Full Usage: + this.toSByte () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + sbyte + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + sbyte +
+
+
+
+
+ +

+ + + this.toSingle () + + +

+
+
+
+ Full Usage: + this.toSingle () +
+
+ Parameters: +
    + + + () + + : + unit + +
    +
+
+ + Returns: + float32 + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + IConvertible +

+
+
+ + () + + : + unit +
+
+
+
+
+ + Returns: + + float32 +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-shape.html b/reference/furnace-shape.html new file mode 100644 index 00000000..ead3512d --- /dev/null +++ b/reference/furnace-shape.html @@ -0,0 +1,179 @@ + + + + + Shape (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Shape Type +

+ +
+
+

+ + Represents the shape of a tensor. + +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-shapeautoopens.html b/reference/furnace-shapeautoopens.html new file mode 100644 index 00000000..b876790c --- /dev/null +++ b/reference/furnace-shapeautoopens.html @@ -0,0 +1,974 @@ + + + + + ShapeAutoOpens (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ShapeAutoOpens Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + boundsIsScalar bounds + + +

+
+
+
+ Full Usage: + boundsIsScalar bounds +
+
+ Parameters: +
    + + + bounds + + : + int[,] + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the full bounds is a scalar location + +

+
+
+
+ + bounds + + : + int[,] +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + boundsToLocation bounds + + +

+
+
+
+ Full Usage: + boundsToLocation bounds +
+
+ Parameters: +
    + + + bounds + + : + int[,] + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Converts the array of three-position bounds specifications to a location. + +

+
+
+
+ + bounds + + : + int[,] +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + boundsToShape bounds + + +

+
+
+
+ Full Usage: + boundsToShape bounds +
+
+ Parameters: +
    + + + bounds + + : + int[,] + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Converts the array of three-position bounds specifications to a shape without squeezing out scalars + +

+
+
+
+ + bounds + + : + int[,] +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + dilatedCoordinates coordinates dilations + + +

+
+
+
+ Full Usage: + dilatedCoordinates coordinates dilations +
+
+ Parameters: +
    + + + coordinates + + : + int[] + +
    + + + dilations + + : + int[] + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Dilates the given coordinates. + +

+
+
+
+ + coordinates + + : + int[] +
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + flatIndexToIndex shape flatIndex + + +

+
+
+
+ Full Usage: + flatIndexToIndex shape flatIndex +
+
+ Parameters: +
    + + + shape + + : + int[] + +
    + + + flatIndex + + : + int + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Converts the given flat index to an index in the context of the given shape. + +

+
+
+
+ + shape + + : + int[] +
+
+
+ + flatIndex + + : + int +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + indexToFlatIndex shape index + + +

+
+
+
+ Full Usage: + indexToFlatIndex shape index +
+
+ Parameters: +
    + + + shape + + : + int[] + +
    + + + index + + : + int[] + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Converts the given index to a flat index in the context of the given shape. + +

+
+
+
+ + shape + + : + int[] +
+
+
+ + index + + : + int[] +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + mirrorCoordinates coordinates shape mirrorDims + + +

+
+
+
+ Full Usage: + mirrorCoordinates coordinates shape mirrorDims +
+
+ Parameters: +
    + + + coordinates + + : + int[] + +
    + + + shape + + : + int[] + +
    + + + mirrorDims + + : + int[] + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Mirrors the coordinates in the given dimensions in the context of the given shape. + +

+
+
+
+ + coordinates + + : + int[] +
+
+
+ + shape + + : + int[] +
+
+
+ + mirrorDims + + : + int[] +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + shapeLength shape + + +

+
+
+
+ Full Usage: + shapeLength shape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Gets the total number of elements in a shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + shapeToFullBounds shape + + +

+
+
+
+ Full Usage: + shapeToFullBounds shape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int[,] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int[,] +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-shapemodule.html b/reference/furnace-shapemodule.html new file mode 100644 index 00000000..d695d759 --- /dev/null +++ b/reference/furnace-shapemodule.html @@ -0,0 +1,8867 @@ + + + + + Shape (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Shape Module +

+ +
+
+

+ + Contains functions and values related to tensor shapes. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + broadcast2 shape1 shape2 + + +

+
+
+
+ Full Usage: + broadcast2 shape1 shape2 +
+
+ Parameters: +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Finds the shape into which `shape1` and `shape2` can be expanded. + +

+
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + broadcastShapes shapes + + +

+
+
+
+ Full Usage: + broadcastShapes shapes +
+
+ Parameters: +
    + + + shapes + + : + Shape[] + +
    +
+
+ + Returns: + Shape + +
+
+
+
+
+
+ + + + + + +

+ + Finds the shape into which all the shapes can be expanded. + +

+
+
+
+ + shapes + + : + Shape[] +
+
+
+
+
+ + Returns: + + Shape +
+
+
+
+
+ +

+ + + canExpand oldShape newShape + + +

+
+
+
+ Full Usage: + canExpand oldShape newShape +
+
+ Parameters: +
    + + + oldShape + + : + Shape + +
    + + + newShape + + : + Shape + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if one shape can expand into another through the addition of broadcast dimensions. + +

+
+
+
+ + oldShape + + : + Shape +
+
+
+ + newShape + + : + Shape +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + checkCanAddSlice shape1 location shape2 + + +

+
+
+
+ Full Usage: + checkCanAddSlice shape1 location shape2 +
+
+ Parameters: +
    + + + shape1 + + : + Shape + +
    + + + location + + : + int[] + +
    + + + shape2 + + : + Shape + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for an addSlice operation. + +

+
+
+
+ + shape1 + + : + Shape +
+
+
+ + location + + : + int[] +
+
+
+ + shape2 + + : + Shape +
+
+
+
+
+ +

+ + + checkCanAvgpool1d dtype shape kernelSize stride padding + + +

+
+
+
+ Full Usage: + checkCanAvgpool1d dtype shape kernelSize stride padding +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    +
+
+ + Returns: + int * int * int * int * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+
+
+ + Returns: + + int * int * int * int * int[] +
+
+
+
+
+ +

+ + + checkCanAvgpool2d dtype shape kernelSize strides paddings + + +

+
+
+
+ Full Usage: + checkCanAvgpool2d dtype shape kernelSize strides paddings +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int) * (int * int) * (int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int) * (int * int) * (int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanAvgpool3d dtype shape kernelSize strides paddings + + +

+
+
+
+ Full Usage: + checkCanAvgpool3d dtype shape kernelSize strides paddings +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int * int) * (int * int * int) * (int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for an avgpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int * int) * (int * int * int) * (int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanBMM shape1 shape2 + + +

+
+
+
+ Full Usage: + checkCanBMM shape1 shape2 +
+
+ Parameters: +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a batched matrix multiplication operation. + +

+
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + checkCanCat shapes dim + + +

+
+
+
+ Full Usage: + checkCanCat shapes dim +
+
+ Parameters: +
    + + + shapes + + : + Shape[] + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + int * int[] * int * int[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a concatenation operation and returns information related to the resulting shape. + +

+
+
+
+ + shapes + + : + Shape[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + int * int[] * int * int[] * int[] +
+
+
+
+
+ +

+ + + checkCanConv1d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 stride padding dilation + + +

+
+
+
+ Full Usage: + checkCanConv1d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 stride padding dilation +
+
+ Parameters: +
    + + + deviceType1 + + : + DeviceType + +
    + + + deviceType2 + + : + DeviceType + +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    + + + dilation + + : + int + +
    +
+
+ + Returns: + int * int * int * int * int * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape. + +

+
+
+
+ + deviceType1 + + : + DeviceType +
+
+
+ + deviceType2 + + : + DeviceType +
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+ + dilation + + : + int +
+
+
+
+
+ + Returns: + + int * int * int * int * int * int[] +
+
+
+
+
+ +

+ + + checkCanConv2d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations + + +

+
+
+
+ Full Usage: + checkCanConv2d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations +
+
+ Parameters: +
    + + + deviceType1 + + : + DeviceType + +
    + + + deviceType2 + + : + DeviceType + +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    + + + dilations + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int) * (int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape. + +

+
+
+
+ + deviceType1 + + : + DeviceType +
+
+
+ + deviceType2 + + : + DeviceType +
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int) * (int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanConv3d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations + + +

+
+
+
+ Full Usage: + checkCanConv3d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations +
+
+ Parameters: +
    + + + deviceType1 + + : + DeviceType + +
    + + + deviceType2 + + : + DeviceType + +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    + + + dilations + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int * int) * (int * int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a convolution operation and returns information related to the resulting shape. + +

+
+
+
+ + deviceType1 + + : + DeviceType +
+
+
+ + deviceType2 + + : + DeviceType +
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int * int) * (int * int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanConvTranspose1d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 stride padding dilation outputPadding + + +

+
+
+
+ Full Usage: + checkCanConvTranspose1d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 stride padding dilation outputPadding +
+
+ Parameters: +
    + + + deviceType1 + + : + DeviceType + +
    + + + deviceType2 + + : + DeviceType + +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    + + + dilation + + : + int + +
    + + + outputPadding + + : + int + +
    +
+
+ + Returns: + int * int * int * int * int * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape. + +

+
+
+
+ + deviceType1 + + : + DeviceType +
+
+
+ + deviceType2 + + : + DeviceType +
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+ + dilation + + : + int +
+
+
+ + outputPadding + + : + int +
+
+
+
+
+ + Returns: + + int * int * int * int * int * int[] +
+
+
+
+
+ +

+ + + checkCanConvTranspose2d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations outputPaddings + + +

+
+
+
+ Full Usage: + checkCanConvTranspose2d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations outputPaddings +
+
+ Parameters: +
    + + + deviceType1 + + : + DeviceType + +
    + + + deviceType2 + + : + DeviceType + +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    + + + dilations + + : + int[] + +
    + + + outputPaddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int) * (int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape. + +

+
+
+
+ + deviceType1 + + : + DeviceType +
+
+
+ + deviceType2 + + : + DeviceType +
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+ + dilations + + : + int[] +
+
+
+ + outputPaddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int) * (int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanConvTranspose3d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations outputPaddings + + +

+
+
+
+ Full Usage: + checkCanConvTranspose3d deviceType1 deviceType2 dtype1 dtype2 shape1 shape2 strides paddings dilations outputPaddings +
+
+ Parameters: +
    + + + deviceType1 + + : + DeviceType + +
    + + + deviceType2 + + : + DeviceType + +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    + + + dilations + + : + int[] + +
    + + + outputPaddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int * int) * (int * int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a transposed convolution operation and returns information related to the resulting shape. + +

+
+
+
+ + deviceType1 + + : + DeviceType +
+
+
+ + deviceType2 + + : + DeviceType +
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+ + dilations + + : + int[] +
+
+
+ + outputPaddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int * int) * (int * int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanDet shape + + +

+
+
+
+ Full Usage: + checkCanDet shape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a determinant operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+
+
+ +

+ + + checkCanDilate dim dilations + + +

+
+
+
+ Full Usage: + checkCanDilate dim dilations +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + dilations + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a dilate operation. + +

+
+
+
+ + dim + + : + int +
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ +

+ + + checkCanDot shape1 shape2 + + +

+
+
+
+ Full Usage: + checkCanDot shape1 shape2 +
+
+ Parameters: +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a dot product operation. + +

+
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+
+
+ +

+ + + checkCanDropout p + + +

+
+
+
+ Full Usage: + checkCanDropout p +
+
+ Parameters: +
    + + + p + + : + double + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a dropout operation. + +

+
+
+
+ + p + + : + double +
+
+
+
+
+ +

+ + + checkCanDropout2d shape p + + +

+
+
+
+ Full Usage: + checkCanDropout2d shape p +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + p + + : + double + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a dropout2d operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + p + + : + double +
+
+
+
+
+ +

+ + + checkCanDropout3d shape p + + +

+
+
+
+ Full Usage: + checkCanDropout3d shape p +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + p + + : + double + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a dropout3d operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + p + + : + double +
+
+
+
+
+ +

+ + + checkCanExpand oldShape newShape + + +

+
+
+
+ Full Usage: + checkCanExpand oldShape newShape +
+
+ Parameters: +
    + + + oldShape + + : + Shape + +
    + + + newShape + + : + Shape + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if one shape can expand into another through the addition of broadcast dimensions. + +

+
+
+
+ + oldShape + + : + Shape +
+
+
+ + newShape + + : + Shape +
+
+
+
+
+ +

+ + + checkCanFlatten shape startDim endDim + + +

+
+
+
+ Full Usage: + checkCanFlatten shape startDim endDim +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + startDim + + : + int + +
    + + + endDim + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a flatten operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + startDim + + : + int +
+
+
+ + endDim + + : + int +
+
+
+
+
+ +

+ + + checkCanFlip dim dims + + +

+
+
+
+ Full Usage: + checkCanFlip dim dims +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + dims + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a flip operation. + +

+
+
+
+ + dim + + : + int +
+
+
+ + dims + + : + int[] +
+
+
+
+
+ +

+ + + checkCanGather shape dim indicesShape indicesDtype + + +

+
+
+
+ Full Usage: + checkCanGather shape dim indicesShape indicesDtype +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dim + + : + int + +
    + + + indicesShape + + : + Shape + +
    + + + indicesDtype + + : + Dtype + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a gather operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dim + + : + int +
+
+
+ + indicesShape + + : + Shape +
+
+
+ + indicesDtype + + : + Dtype +
+
+
+
+
+ +

+ + + checkCanGetSlice shape fullBounds + + +

+
+
+
+ Full Usage: + checkCanGetSlice shape fullBounds +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + fullBounds + + : + int[,] + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a GetSlice operation and returns information related to the resulting shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + fullBounds + + : + int[,] +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + checkCanIndex shape index + + +

+
+
+
+ Full Usage: + checkCanIndex shape index +
+
+ Parameters: +
    + + + shape + + : + int[] + +
    + + + index + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given index is valid in the context of the given shape. + +

+
+
+
+ + shape + + : + int[] +
+
+
+ + index + + : + int[] +
+
+
+
+
+ +

+ + + checkCanInvert shape + + +

+
+
+
+ Full Usage: + checkCanInvert shape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a transpose operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+
+
+ +

+ + + checkCanMatmul shape1 shape2 + + +

+
+
+
+ Full Usage: + checkCanMatmul shape1 shape2 +
+
+ Parameters: +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    +
+
+ + Returns: + (int[] * int[]) * (int[] * int[]) + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a matmul operation. + +

+
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+
+
+ + Returns: + + (int[] * int[]) * (int[] * int[]) +
+
+
+
+
+ +

+ + + checkCanMaxOrAvgpool1d nm dtype shape kernelSize stride padding + + +

+
+
+
+ Full Usage: + checkCanMaxOrAvgpool1d nm dtype shape kernelSize stride padding +
+
+ Parameters: +
    + + + nm + + : + string + +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    +
+
+ + Returns: + int * int * int * int * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape. + +

+
+
+
+ + nm + + : + string +
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+
+
+ + Returns: + + int * int * int * int * int[] +
+
+
+
+
+ +

+ + + checkCanMaxOrAvgpool2d nm dtype shape kernelSize strides paddings + + +

+
+
+
+ Full Usage: + checkCanMaxOrAvgpool2d nm dtype shape kernelSize strides paddings +
+
+ Parameters: +
    + + + nm + + : + string + +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int) * (int * int) * (int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape. + +

+
+
+
+ + nm + + : + string +
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int) * (int * int) * (int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanMaxOrAvgpool3d nm dtype shape kernelSize strides paddings + + +

+
+
+
+ Full Usage: + checkCanMaxOrAvgpool3d nm dtype shape kernelSize strides paddings +
+
+ Parameters: +
    + + + nm + + : + string + +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int * int) * (int * int * int) * (int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape. + +

+
+
+
+ + nm + + : + string +
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int * int) * (int * int * int) * (int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanMaxpool1d dtype shape kernelSize stride padding + + +

+
+
+
+ Full Usage: + checkCanMaxpool1d dtype shape kernelSize stride padding +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int + +
    + + + stride + + : + int + +
    + + + padding + + : + int + +
    +
+
+ + Returns: + int * int * int * int * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int +
+
+
+ + stride + + : + int +
+
+
+ + padding + + : + int +
+
+
+
+
+ + Returns: + + int * int * int * int * int[] +
+
+
+
+
+ +

+ + + checkCanMaxpool2d dtype shape kernelSize strides paddings + + +

+
+
+
+ Full Usage: + checkCanMaxpool2d dtype shape kernelSize strides paddings +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int) * (int * int) * (int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int) * (int * int) * (int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanMaxpool3d dtype shape kernelSize strides paddings + + +

+
+
+
+ Full Usage: + checkCanMaxpool3d dtype shape kernelSize strides paddings +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + kernelSize + + : + int[] + +
    + + + strides + + : + int[] + +
    + + + paddings + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int * int) * (int * int * int) * (int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + kernelSize + + : + int[] +
+
+
+ + strides + + : + int[] +
+
+
+ + paddings + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int * int) * (int * int * int) * (int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanMaxunpool1d dtype shape indicesDtype indicesShape outputSize + + +

+
+
+
+ Full Usage: + checkCanMaxunpool1d dtype shape indicesDtype indicesShape outputSize +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + indicesDtype + + : + Dtype + +
    + + + indicesShape + + : + Shape + +
    + + + outputSize + + : + int[] + +
    +
+
+ + Returns: + int * int * int * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + indicesDtype + + : + Dtype +
+
+
+ + indicesShape + + : + Shape +
+
+
+ + outputSize + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * int * int[] +
+
+
+
+
+ +

+ + + checkCanMaxunpool2d dtype shape indicesDtype indicesShape outputSize + + +

+
+
+
+ Full Usage: + checkCanMaxunpool2d dtype shape indicesDtype indicesShape outputSize +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + indicesDtype + + : + Dtype + +
    + + + indicesShape + + : + Shape + +
    + + + outputSize + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + indicesDtype + + : + Dtype +
+
+
+ + indicesShape + + : + Shape +
+
+
+ + outputSize + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanMaxunpool3d dtype shape indicesDtype indicesShape outputSize + + +

+
+
+
+ Full Usage: + checkCanMaxunpool3d dtype shape indicesDtype indicesShape outputSize +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    + + + shape + + : + Shape + +
    + + + indicesDtype + + : + Dtype + +
    + + + indicesShape + + : + Shape + +
    + + + outputSize + + : + int[] + +
    +
+
+ + Returns: + int * int * (int * int * int) * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a maxunpool operation and returns information related to the resulting shape. + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+ + shape + + : + Shape +
+
+
+ + indicesDtype + + : + Dtype +
+
+
+ + indicesShape + + : + Shape +
+
+
+ + outputSize + + : + int[] +
+
+
+
+
+ + Returns: + + int * int * (int * int * int) * int[] +
+
+
+
+
+ +

+ + + checkCanMinMaxReduce dim keepDim shape + + +

+
+
+
+ Full Usage: + checkCanMinMaxReduce dim keepDim shape +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + keepDim + + : + bool + +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + dim + + : + int +
+
+
+ + keepDim + + : + bool +
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + checkCanPad shape paddings + + +

+
+
+
+ Full Usage: + checkCanPad shape paddings +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + paddings + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a pad operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + paddings + + : + int[] +
+
+
+
+
+ +

+ + + checkCanPermute shape permutation + + +

+
+
+
+ Full Usage: + checkCanPermute shape permutation +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + permutation + + : + int[] + +
    +
+
+ + Returns: + int[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a permute operation and returns information related to the resulting shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + permutation + + : + int[] +
+
+
+
+
+ + Returns: + + int[] * int[] +
+
+
+
+
+ +

+ + + checkCanRepeat shape dim + + +

+
+
+
+ Full Usage: + checkCanRepeat shape dim +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dim + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a repeat operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dim + + : + int +
+
+
+
+
+ +

+ + + checkCanScatter shape dim indicesShape indicesDtype destinationShape + + +

+
+
+
+ Full Usage: + checkCanScatter shape dim indicesShape indicesDtype destinationShape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dim + + : + int + +
    + + + indicesShape + + : + Shape + +
    + + + indicesDtype + + : + Dtype + +
    + + + destinationShape + + : + Shape + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a scatter operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dim + + : + int +
+
+
+ + indicesShape + + : + Shape +
+
+
+ + indicesDtype + + : + Dtype +
+
+
+ + destinationShape + + : + Shape +
+
+
+
+
+ +

+ + + checkCanSolve shapeA shapeB + + +

+
+
+
+ Full Usage: + checkCanSolve shapeA shapeB +
+
+ Parameters: +
    + + + shapeA + + : + Shape + +
    + + + shapeB + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a linear solve operation, and returns the resulting shape of the solution + +

+
+
+
+ + shapeA + + : + Shape +
+
+
+ + shapeB + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + checkCanSplit shape sizes dim + + +

+
+
+
+ Full Usage: + checkCanSplit shape sizes dim +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + sizes + + : + int[] + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + int[][] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a split operation and returns information related to the resulting shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + sizes + + : + int[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + int[][] +
+
+
+
+
+ +

+ + + checkCanStack shapes dim + + +

+
+
+
+ Full Usage: + checkCanStack shapes dim +
+
+ Parameters: +
    + + + shapes + + : + Shape[] + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + int * int[] * int[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a stack operation and returns information related to the resulting shape. + +

+
+
+
+ + shapes + + : + Shape[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + int * int[] * int[] * int[] +
+
+
+
+
+ +

+ + + checkCanTranspose shape dim0 dim1 + + +

+
+
+
+ Full Usage: + checkCanTranspose shape dim0 dim1 +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dim0 + + : + int + +
    + + + dim1 + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a transpose operation and returns information related to the resulting shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dim0 + + : + int +
+
+
+ + dim1 + + : + int +
+
+
+
+
+ +

+ + + checkCanTranspose2d dim + + +

+
+
+
+ Full Usage: + checkCanTranspose2d dim +
+
+ Parameters: +
    + + + dim + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a transpose operation. + +

+
+
+
+ + dim + + : + int +
+
+
+
+
+ +

+ + + checkCanUnsqueeze dim shape + + +

+
+
+
+ Full Usage: + checkCanUnsqueeze dim shape +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for an unsqueeze operation and returns the resulting shape. + +

+
+
+
+ + dim + + : + int +
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + checkCanUnstack shape dim + + +

+
+
+
+ Full Usage: + checkCanUnstack shape dim +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + int[] * int[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for an unstack operation and returns information related to the resulting shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + int[] * int[] * int[] +
+
+
+
+
+ +

+ + + checkCanView shape1 shape2 + + +

+
+
+
+ Full Usage: + checkCanView shape1 shape2 +
+
+ Parameters: +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shape is appropriate for a view operation. + +

+
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+
+
+ +

+ + + checkDeviceTypes deviceType1 deviceType2 + + +

+
+
+
+ Full Usage: + checkDeviceTypes deviceType1 deviceType2 +
+
+ Parameters: + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the two device types are equal. + +

+
+
+
+ + deviceType1 + + : + DeviceType +
+
+
+ + deviceType2 + + : + DeviceType +
+
+
+
+
+ +

+ + + checkDtypes dtype1 dtype2 + + +

+
+
+
+ Full Usage: + checkDtypes dtype1 dtype2 +
+
+ Parameters: +
    + + + dtype1 + + : + Dtype + +
    + + + dtype2 + + : + Dtype + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Checks if the two tensor element types are equal. + +

+
+
+
+ + dtype1 + + : + Dtype +
+
+
+ + dtype2 + + : + Dtype +
+
+
+
+
+ +

+ + + complete nelement shape + + +

+
+
+
+ Full Usage: + complete nelement shape +
+
+ Parameters: +
    + + + nelement + + : + int + +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + Shape + +
+
+
+
+
+
+ + + + + + +

+ + Completes the given shape with respect to a tensor with the given number of elements. + +

+
+
+
+ + nelement + + : + int +
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + Shape +
+
+
+
+
+ +

+ + + completeDim dims dim + + +

+
+
+
+ Full Usage: + completeDim dims dim +
+
+ Parameters: +
    + + + dims + + : + int + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Completes the given shape dimension with respect to a concrete dimension. + +

+
+
+
+ + dims + + : + int +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + completeDimUnsqueeze dims dim + + +

+
+
+
+ Full Usage: + completeDimUnsqueeze dims dim +
+
+ Parameters: +
    + + + dims + + : + int + +
    + + + dim + + : + int + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Completes the given shape dimension with respect to a concrete dimension, for the unsqueeze operation. + +

+
+
+
+ + dims + + : + int +
+
+
+ + dim + + : + int +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + completeExpand shape newShape + + +

+
+
+
+ Full Usage: + completeExpand shape newShape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + newShape + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Completes the new shape for an expand operation based on the current shape of the tensor. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + newShape + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + completeSliceBounds shape bounds + + +

+
+
+
+ Full Usage: + completeSliceBounds shape bounds +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + bounds + + : + int[,] + +
    +
+
+ + Returns: + int[,] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + shape + + : + Shape +
+
+
+ + bounds + + : + int[,] +
+
+
+
+
+ + Returns: + + int[,] +
+
+
+
+
+ +

+ + + computeTranspose2d shape + + +

+
+
+
+ Full Usage: + computeTranspose2d shape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Checks if the given shapes are appropriate for a transpose operation and returns information related to the resulting shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + contains bigShape smallShape + + +

+
+
+
+ Full Usage: + contains bigShape smallShape +
+
+ Parameters: +
    + + + bigShape + + : + Shape + +
    + + + smallShape + + : + Shape + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if one shape contains another. + +

+
+
+
+ + bigShape + + : + Shape +
+
+
+ + smallShape + + : + Shape +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + create xs + + +

+
+
+
+ Full Usage: + create xs +
+
+ Parameters: +
    + + + xs + + : + seq<int> + +
    +
+
+ + Returns: + int[] + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + xs + + : + seq<int> +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + dilated shape dilations + + +

+
+
+
+ Full Usage: + dilated shape dilations +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dilations + + : + int[] + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Computes the shape that results from a dilation operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + flatten startDim endDim shape + + +

+
+
+
+ Full Usage: + flatten startDim endDim shape +
+
+ Parameters: +
    + + + startDim + + : + int + +
    + + + endDim + + : + int + +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Computes the shape that results from a flatten operation. + +

+
+
+
+ + startDim + + : + int +
+
+
+ + endDim + + : + int +
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + locationToBounds shape location + + +

+
+
+
+ Full Usage: + locationToBounds shape location +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + location + + : + int[] + +
    +
+
+ + Returns: + int[,] + +
+
+
+
+
+
+ + + + + + +

+ + Converts the given location to a three-element bounds array in the context of the given shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + location + + : + int[] +
+
+
+
+
+ + Returns: + + int[,] +
+
+
+
+
+ +

+ + + nelement shape + + +

+
+
+
+ Full Usage: + nelement shape +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Gets the total number of elements in the shape. + +

+
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + resolve2dConvOutputPadding outputPadding outputPaddings + + +

+
+
+
+ Full Usage: + resolve2dConvOutputPadding outputPadding outputPaddings +
+
+ Parameters: +
    + + + outputPadding + + : + int option + +
    + + + outputPaddings + + : + 'b option + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + outputPadding + + : + int option +
+
+
+ + outputPaddings + + : + 'b option +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + resolve2dConvSizes stride strides padding paddings dilation dilations + + +

+
+
+
+ Full Usage: + resolve2dConvSizes stride strides padding paddings dilation dilations +
+
+ Parameters: +
    + + + stride + + : + int option + +
    + + + strides + + : + 'b option + +
    + + + padding + + : + int option + +
    + + + paddings + + : + 'c option + +
    + + + dilation + + : + int option + +
    + + + dilations + + : + 'd option + +
    +
+
+ + Returns: + int[] * int[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + stride + + : + int option +
+
+
+ + strides + + : + 'b option +
+
+
+ + padding + + : + int option +
+
+
+ + paddings + + : + 'c option +
+
+
+ + dilation + + : + int option +
+
+
+ + dilations + + : + 'd option +
+
+
+
+
+ + Returns: + + int[] * int[] * int[] +
+
+
+
+
+ +

+ + + resolve2dKernelSizes kernelSize kernelSizes + + +

+
+
+
+ Full Usage: + resolve2dKernelSizes kernelSize kernelSizes +
+
+ Parameters: +
    + + + kernelSize + + : + int option + +
    + + + kernelSizes + + : + 'a option + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + kernelSize + + : + int option +
+
+
+ + kernelSizes + + : + 'a option +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + resolve2dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings + + +

+
+
+
+ Full Usage: + resolve2dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings +
+
+ Parameters: +
    + + + kernelSize + + : + 'b option + +
    + + + kernelSizes + + : + 'c option + +
    + + + stride + + : + 'b option + +
    + + + strides + + : + 'd option + +
    + + + padding + + : + int option + +
    + + + paddings + + : + 'e option + +
    +
+
+ + Returns: + 'b[] * 'b[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + kernelSize + + : + 'b option +
+
+
+ + kernelSizes + + : + 'c option +
+
+
+ + stride + + : + 'b option +
+
+
+ + strides + + : + 'd option +
+
+
+ + padding + + : + int option +
+
+
+ + paddings + + : + 'e option +
+
+
+
+
+ + Returns: + + 'b[] * 'b[] * int[] +
+
+
+
+
+ +

+ + + resolve3dConvOutputPadding outputPadding outputPaddings + + +

+
+
+
+ Full Usage: + resolve3dConvOutputPadding outputPadding outputPaddings +
+
+ Parameters: +
    + + + outputPadding + + : + int option + +
    + + + outputPaddings + + : + 'b option + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + outputPadding + + : + int option +
+
+
+ + outputPaddings + + : + 'b option +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + resolve3dConvSizes stride strides padding paddings dilation dilations + + +

+
+
+
+ Full Usage: + resolve3dConvSizes stride strides padding paddings dilation dilations +
+
+ Parameters: +
    + + + stride + + : + int option + +
    + + + strides + + : + 'b option + +
    + + + padding + + : + int option + +
    + + + paddings + + : + 'c option + +
    + + + dilation + + : + int option + +
    + + + dilations + + : + 'd option + +
    +
+
+ + Returns: + int[] * int[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + stride + + : + int option +
+
+
+ + strides + + : + 'b option +
+
+
+ + padding + + : + int option +
+
+
+ + paddings + + : + 'c option +
+
+
+ + dilation + + : + int option +
+
+
+ + dilations + + : + 'd option +
+
+
+
+
+ + Returns: + + int[] * int[] * int[] +
+
+
+
+
+ +

+ + + resolve3dKernelSizes kernelSize kernelSizes + + +

+
+
+
+ Full Usage: + resolve3dKernelSizes kernelSize kernelSizes +
+
+ Parameters: +
    + + + kernelSize + + : + int option + +
    + + + kernelSizes + + : + 'a option + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + kernelSize + + : + int option +
+
+
+ + kernelSizes + + : + 'a option +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + resolve3dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings + + +

+
+
+
+ Full Usage: + resolve3dMaxPoolSizes kernelSize kernelSizes stride strides padding paddings +
+
+ Parameters: +
    + + + kernelSize + + : + 'b option + +
    + + + kernelSizes + + : + 'c option + +
    + + + stride + + : + 'b option + +
    + + + strides + + : + 'd option + +
    + + + padding + + : + int option + +
    + + + paddings + + : + 'e option + +
    +
+
+ + Returns: + 'b[] * 'b[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + kernelSize + + : + 'b option +
+
+
+ + kernelSizes + + : + 'c option +
+
+
+ + stride + + : + 'b option +
+
+
+ + strides + + : + 'd option +
+
+
+ + padding + + : + int option +
+
+
+ + paddings + + : + 'e option +
+
+
+
+
+ + Returns: + + 'b[] * 'b[] * int[] +
+
+
+
+
+ +

+ + + scalar + + +

+
+
+
+ Full Usage: + scalar +
+
+ + Returns: + Shape + +
+
+
+
+
+
+ + + + + + +

+ + The shape for a scalar value. + +

+
+
+
+ + Returns: + + Shape +
+
+
+
+
+ +

+ + + squeeze dim shape + + +

+
+
+
+ Full Usage: + squeeze dim shape +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + shape + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Computes the shape that results from a squeeze operation. + +

+
+
+
+ + dim + + : + int +
+
+
+ + shape + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + undilatedShape shape dilations + + +

+
+
+
+ Full Usage: + undilatedShape shape dilations +
+
+ Parameters: +
    + + + shape + + : + Shape + +
    + + + dilations + + : + int[] + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Computes the shape that results from an undilation operation. + +

+
+
+
+ + shape + + : + Shape +
+
+
+ + dilations + + : + int[] +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + unsqueezeAs shape1 shape2 + + +

+
+
+
+ Full Usage: + unsqueezeAs shape1 shape2 +
+
+ Parameters: +
    + + + shape1 + + : + Shape + +
    + + + shape2 + + : + Shape + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Computes the shape that results from an unsqueezeAs operation. + +

+
+
+
+ + shape1 + + : + Shape +
+
+
+ + shape2 + + : + Shape +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-shorten.html b/reference/furnace-shorten.html new file mode 100644 index 00000000..c9a2184f --- /dev/null +++ b/reference/furnace-shorten.html @@ -0,0 +1,2100 @@ + + + + + Shorten (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Shorten Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + FurnaceImage.fg f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fg f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fgh f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fgh f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fghvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fghvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fgvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fgvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fh f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fh f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fhvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fhvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fj f x + + +

+
+
+
+ Full Usage: + FurnaceImage.fj f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fjvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fjvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.fvjp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.fvjp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.g f x + + +

+
+
+
+ Full Usage: + FurnaceImage.g f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gh f x + + +

+
+
+
+ Full Usage: + FurnaceImage.gh f x +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.ghvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.ghvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.gvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.gvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.h f x + + +

+
+
+
+ Full Usage: + FurnaceImage.h f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.hvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.hvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.j f x + + +

+
+
+
+ Full Usage: + FurnaceImage.j f x +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.jvp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.jvp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + FurnaceImage.vjp f x v + + +

+
+
+
+ Full Usage: + FurnaceImage.vjp f x v +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ TBD +

+
+

+ Extended Type: + FurnaceImage +

+
+
+ + f + + : + Tensor -> Tensor +
+
+
+ + x + + : + Tensor +
+
+
+ + v + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-slicingextensions.html b/reference/furnace-slicingextensions.html new file mode 100644 index 00000000..5c473db4 --- /dev/null +++ b/reference/furnace-slicingextensions.html @@ -0,0 +1,23900 @@ + + + + + SlicingExtensions (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ SlicingExtensions Module +

+ +
+
+

+ +

+
+
+
+
+

+ Type extensions +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type extension + + Description +
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0min + + : + int option + +
    + + + i0max + + : + int option + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0min + + : + int option +
+
+
+ + i0max + + : + int option +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1min + + : + int option + +
    + + + i1max + + : + int option + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1min + + : + int option +
+
+
+ + i1max + + : + int option +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2min + + : + int option + +
    + + + i2max + + : + int option + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2min + + : + int option +
+
+
+ + i2max + + : + int option +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3min + + : + int option + +
    + + + i3max + + : + int option + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3min + + : + int option +
+
+
+ + i3max + + : + int option +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4min + + : + int option + +
    + + + i4max + + : + int option + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4min + + : + int option +
+
+
+ + i4max + + : + int option +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5min + + : + int option + +
    + + + i5max + + : + int option + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5min + + : + int option +
+
+
+ + i5max + + : + int option +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + i0 + + : + int + +
    + + + i1 + + : + int + +
    + + + i2 + + : + int + +
    + + + i3 + + : + int + +
    + + + i4 + + : + int + +
    + + + i5 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+

+ Extended Type: + Tensor +

+
+
+ + i0 + + : + int +
+
+
+ + i1 + + : + int +
+
+
+ + i2 + + : + int +
+
+
+ + i3 + + : + int +
+
+
+ + i4 + + : + int +
+
+
+ + i5 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-tensor.html b/reference/furnace-tensor.html new file mode 100644 index 00000000..70791c60 --- /dev/null +++ b/reference/furnace-tensor.html @@ -0,0 +1,24405 @@ + + + + + Tensor (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Tensor Type +

+ +
+
+

+ + Represents a multi-dimensional data type containing elements of a single data type. + +

+
+
+ Example +
+

+ + A tensor can be constructed from a list or sequence using FurnaceImage.tensor

+    let t = FurnaceImage.tensor([[1.; -1.]; [1.; -1.]])
+

+
+
+
+
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.GetSlice + + +

+
+
+
+ Full Usage: + this.GetSlice +
+
+ Parameters: +
    + + + bounds + + : + int[,] + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + bounds + + : + int[,] +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.[index] + + +

+
+
+
+ Full Usage: + this.[index] +
+
+ Parameters: +
    + + + index + + : + int[] + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Get the item at the given index as a scalar tensor. +

+
+
+
+ + index + + : + int[] +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.abs () + + +

+
+
+
+ Full Usage: + this.abs () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes the element-wise absolute value of the given input tensor. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.acos () + + +

+
+
+
+ Full Usage: + this.acos () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the arccosine of the elements of input. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.add b + + +

+
+
+
+ Full Usage: + this.add b +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Each element of the object tensor is added to the scalar b. The resulting tensor is returned. +

+
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.add b + + +

+
+
+
+ Full Usage: + this.add b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Each element of the object tensor is added to each corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.addSlice (location, b) + + +

+
+
+
+ Full Usage: + this.addSlice (location, b) +
+
+ Parameters: +
    + + + location + + : + seq<int> + +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Add the given tensor as a slice at the given location. +

+
+
+
+ + location + + : + seq<int> +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.allclose (tensor, ?relativeTolerance, ?absoluteTolerance) + + +

+
+
+
+ Full Usage: + this.allclose (tensor, ?relativeTolerance, ?absoluteTolerance) +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    + + + ?relativeTolerance + + : + float + +
    + + + ?absoluteTolerance + + : + float + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if two tensors have the same shape and all corresponding elements are equal within the + given tolerances. + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+ + ?relativeTolerance + + : + float +
+
+
+ + ?absoluteTolerance + + : + float +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.ancestors () + + +

+
+
+
+ Full Usage: + this.ancestors () +
+
+ + Returns: + Tensor list * string + +
+
+
+
+
+
+ + + + + + +

+ + A debugging routine that returns the ancestors of a tensor involved in reverse-mode automatic differentiation + +

+
+
+
+ + Returns: + + Tensor list * string +
+
+
+
+
+ +

+ + + this.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + endVal + + : + int + +
    + + + ?startVal + + : + int + +
    + + + ?step + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor in the manner of for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + endVal + + : + int +
+
+
+ + ?startVal + + : + int +
+
+
+ + ?step + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.arangeLike (endVal, ?startVal, ?step, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + endVal + + : + float + +
    + + + ?startVal + + : + float + +
    + + + ?step + + : + float + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor in the manner of for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + endVal + + : + float +
+
+
+ + ?startVal + + : + float +
+
+
+ + ?step + + : + float +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.argmax (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + this.argmax (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the indexes of maximum values of the primal of the tensor, reducing the given dimension. +

+
+

+ The resulting tensor does not participate in reverse or forward differentiation. It can be used as input to another operation such as FurnaceImage.gather. +

+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.argmax () + + +

+
+
+
+ Full Usage: + this.argmax () +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Gets the index of a maximum value in the tensor. + +

+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + this.argmin (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + this.argmin (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the indexes of minimum values of the primal of the tensor, reducing the given dimension. +

+
+

+ The resulting tensor does not participate in reverse or forward differentiation. It can be used as input to another operation such as FurnaceImage.gather. +

+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.argmin () + + +

+
+
+
+ Full Usage: + this.argmin () +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Gets the index of a minimum value in the tensor. + +

+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + this.asin () + + +

+
+
+
+ Full Usage: + this.asin () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the arcsine of the elements of input. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.atan () + + +

+
+
+
+ Full Usage: + this.atan () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the arctangent of the elements of input. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.backend + + +

+
+
+
+ Full Usage: + this.backend +
+
+ + Returns: + Backend + +
+
+
+
+
+
+ + + + + + +

+ + Gets the backend of the tensor + +

+
+
+
+ + Returns: + + Backend +
+
+
+
+
+ +

+ + + this.backward value + + +

+
+
+
+ Full Usage: + this.backward value +
+
+ Parameters: +
    + + + value + + : + Tensor + +
    +
+
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ See reverse +

+
+
+
+ + value + + : + Tensor +
+
+
+
+
+ +

+ + + this.bceLoss (target, ?weight, ?reduction) + + +

+
+
+
+ Full Usage: + this.bceLoss (target, ?weight, ?reduction) +
+
+ Parameters: +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?weight + + : + Tensor + - + A manual rescaling weight given to the loss of each batch element. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Creates a criterion that measures the Binary Cross Entropy between the target and the output +

+
+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?weight + + : + Tensor +
+
+

+ A manual rescaling weight given to the loss of each batch element. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.bernoulli (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.bernoulli (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Draws binary random numbers (0 or 1) from a Bernoulli distribution +

+
+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.bfloat16 () + + +

+
+
+
+ Full Usage: + this.bfloat16 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type bfloat16 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.bool () + + +

+
+
+
+ Full Usage: + this.bool () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type bool + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.byte () + + +

+
+
+
+ Full Usage: + this.byte () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type float64 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.cast () + + +

+
+
+
+ Full Usage: + this.cast () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Converts the tensor to a new tensor with the given system type + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.cast dtype + + +

+
+
+
+ Full Usage: + this.cast dtype +
+
+ Parameters: +
    + + + dtype + + : + Dtype + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Converts the tensor to a new tensor with the given + +

+
+
+
+ + dtype + + : + Dtype +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.ceil () + + +

+
+
+
+ Full Usage: + this.ceil () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the ceil of the elements of input, the smallest integer greater than or equal to each element. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.clamp (?low, ?high) + + +

+
+
+
+ Full Usage: + this.clamp (?low, ?high) +
+
+ Parameters: +
    + + + ?low + + : + scalar + - + The lower-bound of the range to be clamped to. + +
    + + + ?high + + : + scalar + - + The upper-bound of the range to be clamped to. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Clamp all elements in input into the range [ low..high] and return a resulting tensor +

+
+
+
+ + ?low + + : + scalar +
+
+

+ The lower-bound of the range to be clamped to. +

+
+
+ + ?high + + : + scalar +
+
+

+ The upper-bound of the range to be clamped to. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.clone () + + +

+
+
+
+ Full Usage: + this.clone () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with underlying storage copied. +

+
+

+ + This method discards differentiability and returns a constant tensor. + +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.conv1d (filters, ?stride, ?padding, ?dilation) + + +

+
+
+
+ Full Usage: + this.conv1d (filters, ?stride, ?padding, ?dilation) +
+
+ Parameters: +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit paddings on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D convolution over an input signal composed of several input planes +

+
+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit paddings on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.conv2d (filters, ?stride, ?padding, ?dilation, ?strides, ?paddings, ?dilations) + + +

+
+
+
+ Full Usage: + this.conv2d (filters, ?stride, ?padding, ?dilation, ?strides, ?paddings, ?dilations) +
+
+ Parameters: +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on corresponding sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D convolution over an input signal composed of several input planes +

+
+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on corresponding sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.conv3d (filters, ?stride, ?padding, ?dilation, ?strides, ?paddings, ?dilations) + + +

+
+
+
+ Full Usage: + this.conv3d (filters, ?stride, ?padding, ?dilation, ?strides, ?paddings, ?dilations) +
+
+ Parameters: +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on corresponding sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D convolution over an input signal composed of several input planes +

+
+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on corresponding sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.convTranspose1d (filters, ?stride, ?padding, ?dilation, ?outputPadding) + + +

+
+
+
+ Full Usage: + this.convTranspose1d (filters, ?stride, ?padding, ?dilation, ?outputPadding) +
+
+ Parameters: +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?outputPadding + + : + int + - + The additional size added to one side of each dimension in the output shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called 'deconvolution'. +

+
+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?outputPadding + + : + int +
+
+

+ The additional size added to one side of each dimension in the output shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.convTranspose2d (filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) + + +

+
+
+
+ Full Usage: + this.convTranspose2d (filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) +
+
+ Parameters: +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?outputPadding + + : + int + - + The additional size added to one side of each dimension in the output shape. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    + + + ?outputPaddings + + : + seq<int> + - + The additional sizes added to one side of each dimension in the output shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D transposed convolution operator over an input signal composed of several input planes, sometimes also called 'deconvolution'. +

+
+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?outputPadding + + : + int +
+
+

+ The additional size added to one side of each dimension in the output shape. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+ + ?outputPaddings + + : + seq<int> +
+
+

+ The additional sizes added to one side of each dimension in the output shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.convTranspose3d (filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) + + +

+
+
+
+ Full Usage: + this.convTranspose3d (filters, ?stride, ?padding, ?dilation, ?outputPadding, ?strides, ?paddings, ?dilations, ?outputPaddings) +
+
+ Parameters: +
    + + + filters + + : + Tensor + - + The filters. + +
    + + + ?stride + + : + int + - + The stride of the convolving kernel. + +
    + + + ?padding + + : + int + - + The implicit padding on both sides of the input. + +
    + + + ?dilation + + : + int + - + The spacing between kernel elements. + +
    + + + ?outputPadding + + : + int + - + The additional size added to one side of each dimension in the output shape. + +
    + + + ?strides + + : + seq<int> + - + The strides of the convolving kernel. + +
    + + + ?paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    + + + ?dilations + + : + seq<int> + - + The spacings between kernel elements. + +
    + + + ?outputPaddings + + : + seq<int> + - + The additional sizes added to one side of each dimension in the output shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D transposed convolution operator over an input signal composed of several input planes, sometimes also called 'deconvolution'. +

+
+
+
+ + filters + + : + Tensor +
+
+

+ The filters. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the convolving kernel. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit padding on both sides of the input. +

+
+
+ + ?dilation + + : + int +
+
+

+ The spacing between kernel elements. +

+
+
+ + ?outputPadding + + : + int +
+
+

+ The additional size added to one side of each dimension in the output shape. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the convolving kernel. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+ + ?dilations + + : + seq<int> +
+
+

+ The spacings between kernel elements. +

+
+
+ + ?outputPaddings + + : + seq<int> +
+
+

+ The additional sizes added to one side of each dimension in the output shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.corrcoef () + + +

+
+
+
+ Full Usage: + this.corrcoef () +
+
+ + Returns: + Tensor + + + The correlation coefficient matrix \(R\) is computed from the covariance + matrix + Returns a square tensor representing the correlation coefficient matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(R_{i,j}\) entry on the correlation matrix is the correlation between + \(x_i\) and \(x_j\). + +
+
+
+
+
+
+ + + + + + +

+ + Estimates the Pearson correlation coefficient matrix for the given tensor. The tensor's first + dimension should index variables and the second dimension should + index observations for each variable. + +

+
+

+ + The correlation between variables \(x\) and \(y\) is + \[cor(x,y)= \frac{\sum^{N}_{i = 1}(x_{i} - \mu_x)(y_{i} - \mu_y)}{\sigma_x \sigma_y (N ~-~1)}\] + where \(\mu_x\) and \(\mu_y\) are the sample means and \(\sigma_x\) and \(\sigma_x\) are + the sample standard deviations. + +

+
+
+ + Returns: + + Tensor +
+
+

+ + The correlation coefficient matrix \(R\) is computed from the covariance + matrix + Returns a square tensor representing the correlation coefficient matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(R_{i,j}\) entry on the correlation matrix is the correlation between + \(x_i\) and \(x_j\). + +

+
+
+
+ Example +
+

+

+ let x = FurnaceImage.tensor([-0.2678; -0.0908; -0.3766;  0.2780])
+ let y = FurnaceImage.tensor([-0.5812;  0.1535;  0.2387;  0.2350])
+ let xy = FurnaceImage.stack([x;y])
+ xy.corrcoef()
+ Evaluates to +
+ tensor([[1.0000, 0.3582],
+         [0.3582, 1.0000]])
+

+
+
+ +

+ + + this.cos () + + +

+
+
+
+ Full Usage: + this.cos () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the cosine of the elements of input +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.cosh () + + +

+
+
+
+ Full Usage: + this.cosh () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the hyperbolic cosine of the elements of input. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.cov (?correction, ?fweights, ?aweights) + + +

+
+
+
+ Full Usage: + this.cov (?correction, ?fweights, ?aweights) +
+
+ Parameters: +
    + + + ?correction + + : + int64 + - + Difference between the sample size and the sample degrees of freedom. Defaults to 1 (Bessel's correction). + +
    + + + ?fweights + + : + Tensor + - + Frequency weights represent the number of times each observation was observed. + Should be given as a tensor of integers. Defaults to no weights. + +
    + + + ?aweights + + : + Tensor + - + Relative importance weights, larger weights for observations that + should have a larger effect on the estimate. + Should be given as a tensor of floating point numbers. Defaults to no weights. + +
    +
+
+ + Returns: + Tensor + + Returns a square tensor representing the covariance matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(C_{i,j}\) entry on the covariance matrix is the covariance between + \(x_i\) and \(x_j\). + +
+
+
+
+
+
+ + + + + + +

+ + Estimates the covariance matrix of the given tensor. The tensor's first + dimension should index variables and the second dimension should + index observations for each variable. + +

+
+

+ + If no weights are given, the covariance between variables \(x\) and \(y\) is + \[cov(x,y)= \frac{\sum^{N}_{i = 1}(x_{i} - \mu_x)(y_{i} - \mu_y)}{N~-~\text{correction}}\] + where \(\mu_x\) and \(\mu_y\) are the sample means. + + If there are fweights or aweights then the covariance is + \[cov(x,y)=\frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}{\text{normalization factor}}\] + where \(w\) is either fweights or aweights if one weight type is provided. + If both weight types are provided \(w=\text{fweights}\times\text{aweights}\). + \(\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}\) + is the weighted mean of variables. + The normalization factor is \(\sum^{N}_{i=1} w_i\) if only fweights are provided or if aweights are provided and correction=0. + Otherwise if aweights \(aw\) are provided the normalization factor is + \(\sum^N_{i=1} w_i - \text{correction}\times\frac{\sum^N_{i=1} w_i aw_i}{\sum^N_{i=1} w_i}\) + +

+
+
+ + ?correction + + : + int64 +
+
+

+ Difference between the sample size and the sample degrees of freedom. Defaults to 1 (Bessel's correction). +

+
+
+ + ?fweights + + : + Tensor +
+
+

+ Frequency weights represent the number of times each observation was observed. + Should be given as a tensor of integers. Defaults to no weights. +

+
+
+ + ?aweights + + : + Tensor +
+
+

+ Relative importance weights, larger weights for observations that + should have a larger effect on the estimate. + Should be given as a tensor of floating point numbers. Defaults to no weights. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ Returns a square tensor representing the covariance matrix. + Given a tensor with \(N\) variables \(X=[x_1,x_2,\ldots,x_N]\) the + \(C_{i,j}\) entry on the covariance matrix is the covariance between + \(x_i\) and \(x_j\). + +

+
+
+
+ Example +
+

+

+ let x = FurnaceImage.tensor([0.0;3.4;5.0])
+ let y = FurnaceImage.tensor([1.0;2.3;-3.0])
+ let xy = FurnaceImage.stack([x;y])
+ xy.cov()
+ Evaluates to +
+ tensor([[ 6.5200, -4.0100],
+         [-4.0100,  7.6300]])
+

+
+
+ +

+ + + this.cpu () + + +

+
+
+
+ Full Usage: + this.cpu () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with the same contents moved to the CPU + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.crossEntropyLoss (target, ?weight, ?reduction) + + +

+
+
+
+ Full Usage: + this.crossEntropyLoss (target, ?weight, ?reduction) +
+
+ Parameters: +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?weight + + : + Tensor + - + A optional manual rescaling weight given to the loss of each batch element. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ This criterion combines logsoftmax and nllLoss in a single function +

+
+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?weight + + : + Tensor +
+
+

+ A optional manual rescaling weight given to the loss of each batch element. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.depth + + +

+
+
+
+ Full Usage: + this.depth +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Gets the differentiation depth of the tensor + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.derivative + + +

+
+
+
+ Full Usage: + this.derivative +
+
+
+
+
+
+
+ + + + + + +

+ + Gets or sets the derivative of a tensor used in differentiation + +

+
+
+
+ +

+ + + this.derivativeDeep + + +

+
+
+
+ Full Usage: + this.derivativeDeep +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.device + + +

+
+
+
+ Full Usage: + this.device +
+
+ + Returns: + Device + +
+
+
+
+
+
+ + + + + + +

+ + Gets the device of the tensor + +

+
+
+
+ + Returns: + + Device +
+
+
+
+
+ +

+ + + this.deviceType + + +

+
+
+
+ Full Usage: + this.deviceType +
+
+ + Returns: + DeviceType + +
+
+
+
+
+
+ + + + + + +

+ + Gets the device type of the tensor + +

+
+
+
+ + Returns: + + DeviceType +
+
+
+
+
+ +

+ + + this.diagonal (?offset, ?dim1, ?dim2) + + +

+
+
+
+ Full Usage: + this.diagonal (?offset, ?dim1, ?dim2) +
+
+ Parameters: +
    + + + ?offset + + : + int + +
    + + + ?dim1 + + : + int + +
    + + + ?dim2 + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor with the diagonal elements with respect to dim1 and dim2. + The argument offset controls which diagonal to consider. + +

+
+
+
+ + ?offset + + : + int +
+
+
+ + ?dim1 + + : + int +
+
+
+ + ?dim2 + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.dilate dilations + + +

+
+
+
+ Full Usage: + this.dilate dilations +
+
+ Parameters: +
    + + + dilations + + : + seq<int> + - + The dilations to use. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Dilate the tensor in using the given dilations in each corresponding dimension. +

+
+
+
+ + dilations + + : + seq<int> +
+
+

+ The dilations to use. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.dim + + +

+
+
+
+ Full Usage: + this.dim +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Gets the number of dimensions of the tensor + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.div b + + +

+
+
+
+ Full Usage: + this.div b +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Divides each element of the object tensor by the scalar b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.div b + + +

+
+
+
+ Full Usage: + this.div b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Divides each element of the object tensor by the corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.dot b + + +

+
+
+
+ Full Usage: + this.dot b +
+
+ Parameters: +
    + + + b + + : + Tensor + - + The vector to multiply this tensor by (1d-tensor). + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes the dot product (inner product) of two vector (1d-tensors). +

+
+

+ This function does not broadcast and expects this tensor to be a vector (1d-tensor). + The tensors must have the same number of elements. + +

+
+
+ + b + + : + Tensor +
+
+

+ The vector to multiply this tensor by (1d-tensor). +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.double () + + +

+
+
+
+ Full Usage: + this.double () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type float64 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.dropout ?p + + +

+
+
+
+ Full Usage: + this.dropout ?p +
+
+ Parameters: +
    + + + ?p + + : + double + - + The probability of an element to be zeroed. Default: 0.5. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution +

+
+
+
+ + ?p + + : + double +
+
+

+ The probability of an element to be zeroed. Default: 0.5. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.dropout2d ?p + + +

+
+
+
+ Full Usage: + this.dropout2d ?p +
+
+ Parameters: +
    + + + ?p + + : + double + - + The probability of an element to be zeroed. Default: 0.5. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Randomly zero out entire channels (a channel is a 2D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 2D tensor \text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution +

+
+
+
+ + ?p + + : + double +
+
+

+ The probability of an element to be zeroed. Default: 0.5. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.dropout3d ?p + + +

+
+
+
+ Full Usage: + this.dropout3d ?p +
+
+ Parameters: +
    + + + ?p + + : + double + - + The probability of an element to be zeroed. Default: 0.5. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Randomly zero out entire channels (a channel is a 3D feature map, e.g., the jj -th channel of the ii -th sample in the batched input is a 3D tensor \text{input}[i, j]input[i,j] ). Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution. +

+
+
+
+ + ?p + + : + double +
+
+

+ The probability of an element to be zeroed. Default: 0.5. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.dtype + + +

+
+
+
+ Full Usage: + this.dtype +
+
+ + Returns: + Dtype + +
+
+
+
+
+
+ + + + + + +

+ + Gets the element type of the tensor + +

+
+
+
+ + Returns: + + Dtype +
+
+
+
+
+ +

+ + + this.elementSize + + +

+
+
+
+ Full Usage: + this.elementSize +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Returns the size in bytes of an individual element in this tensor. Depending on dtype, backend configuration, this is not guaranteed to be correct and can behave differently in different runtime environments. + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.eq b + + +

+
+
+
+ Full Usage: + this.eq b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes element-wise \(a = b\), returning a boolean tensor containing a true at each location where the comparison is true +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.exp () + + +

+
+
+
+ Full Usage: + this.exp () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the exp function element-wise. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.expand newShape + + +

+
+
+
+ Full Usage: + this.expand newShape +
+
+ Parameters: +
    + + + newShape + + : + seq<int> + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new view of the object tensor with singleton dimensions expanded to a larger size. +

+
+

+

Passing -1 as the size for a dimension means not changing the size of that dimension.

The tensor can be also expanded to a larger number of dimensions, and the new ones will be appended + at the front. For the new dimensions, the size cannot be set to -1. +

+ Expanding a tensor does not allocate new memory, but only creates a new view on the existing tensor + where a dimension of size one is expanded to a larger size by setting the stride to 0. Any dimension + of size 1 can be expanded to an arbitrary value without allocating new memory. +

+

+
+
+ + newShape + + : + seq<int> +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.expandAs b + + +

+
+
+
+ Full Usage: + this.expandAs b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Expand this tensor to the same size as the other. +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.fanout + + +

+
+
+
+ Full Usage: + this.fanout +
+
+
+
+
+
+
+ + + + + + +

+ + Gets the fanout of a tensor used in reverse-mode differentiation + +

+
+
+
+ +

+ + + this.flatten (?startDim, ?endDim) + + +

+
+
+
+ Full Usage: + this.flatten (?startDim, ?endDim) +
+
+ Parameters: +
    + + + ?startDim + + : + int + - + The first dim to flatten. + +
    + + + ?endDim + + : + int + - + The last dim to flatten. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Flattens a contiguous range of dims in a tensor. +

+
+
+
+ + ?startDim + + : + int +
+
+

+ The first dim to flatten. +

+
+
+ + ?endDim + + : + int +
+
+

+ The last dim to flatten. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.flip dims + + +

+
+
+
+ Full Usage: + this.flip dims +
+
+ Parameters: +
    + + + dims + + : + seq<int> + - + The axis to flip on. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Reverse the order of a n-D tensor along given axis in dims +

+
+
+
+ + dims + + : + seq<int> +
+
+

+ The axis to flip on. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.float () + + +

+
+
+
+ Full Usage: + this.float () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type float64 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.float16 () + + +

+
+
+
+ Full Usage: + this.float16 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type float16 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.float32 () + + +

+
+
+
+ Full Usage: + this.float32 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type float32 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.float64 () + + +

+
+
+
+ Full Usage: + this.float64 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type float64 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.floor () + + +

+
+
+
+ Full Usage: + this.floor () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the floor of the elements of input, the largest integer less than or equal to each element. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.forwardDiff (derivative, ?nestingTag) + + +

+
+
+
+ Full Usage: + this.forwardDiff (derivative, ?nestingTag) +
+
+ Parameters: +
    + + + derivative + + : + Tensor + +
    + + + ?nestingTag + + : + uint32 + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the input tensor with added support for forward-mode automatic differentiation. + +

+
+

+ + Any tensors produced using this tensor will have attached derivatives for forward mode propagation. + The current global nesting level is used for nested differentiation. + +

+
+
+ + derivative + + : + Tensor +
+
+
+ + ?nestingTag + + : + uint32 +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.fullLike (value, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.fullLike (value, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + value + + : + scalar + +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor filled with the given scalar value for the given shape, element type and configuration, defaulting to the + shape and configuration of the input tensor. + +

+
+
+
+ + value + + : + scalar +
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.gather (dim, indices) + + +

+
+
+
+ Full Usage: + this.gather (dim, indices) +
+
+ Parameters: +
    + + + dim + + : + int + - + The axis along which to index. + +
    + + + indices + + : + Tensor + - + The the indices of elements to gather. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Gathers values along an axis specified by dim. +

+
+
+
+ + dim + + : + int +
+
+

+ The axis along which to index. +

+
+
+ + indices + + : + Tensor +
+
+

+ The the indices of elements to gather. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.ge b + + +

+
+
+
+ Full Usage: + this.ge b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes element-wise \(a \geq b\), returning a boolean tensor containing a true at each location where the comparison is true +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.gpu () + + +

+
+
+
+ Full Usage: + this.gpu () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with the same contents moved to the primary GPU device + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.gt b + + +

+
+
+
+ Full Usage: + this.gt b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes element-wise \(a > b\), returning a boolean tensor containing a true at each location where the comparison is true +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.hasinf () + + +

+
+
+
+ Full Usage: + this.hasinf () +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Gets if any value in the tensor is +/- INF. + +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.hasinfnan () + + +

+
+
+
+ Full Usage: + this.hasinfnan () +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Gets if any value in the tensor is NaN or +/- INF. + +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.hasnan () + + +

+
+
+
+ Full Usage: + this.hasnan () +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Gets if any value in the tensor is NaN. + +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.int () + + +

+
+
+
+ Full Usage: + this.int () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type int32 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.int16 () + + +

+
+
+
+ Full Usage: + this.int16 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type int16 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.int32 () + + +

+
+
+
+ Full Usage: + this.int32 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type int32 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.int64 () + + +

+
+
+
+ Full Usage: + this.int64 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type int64 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.int8 () + + +

+
+
+
+ Full Usage: + this.int8 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with each element converted to type int8 + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.isForwardDiff + + +

+
+
+
+ Full Usage: + this.isForwardDiff +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if a tensor is taking part in forward-mode differentiation + +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.isNoDiff + + +

+
+
+
+ Full Usage: + this.isNoDiff +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if a tensor is a constant, meaning that it is not taking part in forward or reverse-mode differentiation + +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.isReverseDiff + + +

+
+
+
+ Full Usage: + this.isReverseDiff +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if a tensor is taking part in reverse-mode differentiation + +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.isSameDiffType t2 + + +

+
+
+
+ Full Usage: + this.isSameDiffType t2 +
+
+ Parameters: +
    + + + t2 + + : + Tensor + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if two tensors have the same differentiation type + +

+
+
+
+ + t2 + + : + Tensor +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.isinf () + + +

+
+
+
+ Full Usage: + this.isinf () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with boolean elements representing if each element is +/-INF or not. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.isnan () + + +

+
+
+
+ Full Usage: + this.isnan () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with boolean elements representing if each element is NaN or not. Complex values are considered NaN when either their real and/or imaginary part is NaN. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.le b + + +

+
+
+
+ Full Usage: + this.le b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes element-wise \(a \leq b\), returning a boolean tensor containing a true at each location where the comparison is true +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.leakyRelu ?negativeSlope + + +

+
+
+
+ Full Usage: + this.leakyRelu ?negativeSlope +
+
+ Parameters: +
    + + + ?negativeSlope + + : + float + - + Controls the angle of the negative slope. Default: 0.01. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the leaky rectified linear unit function element-wise +

+
+

+ \[\text{leakyRelu}(x) = \max(0, x) + \text{negativeSlope} * \min(0, x)\] +

+
+
+ + ?negativeSlope + + : + float +
+
+

+ Controls the angle of the negative slope. Default: 0.01. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.like (value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.like (value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + value + + : + obj + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor from the .NET data in value for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + value + + : + obj +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + int + +
    + + + endVal + + : + int + +
    + + + steps + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor in the manner of for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + startVal + + : + int +
+
+
+ + endVal + + : + int +
+
+
+ + steps + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.linspaceLike (startVal, endVal, steps, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + float + +
    + + + endVal + + : + float + +
    + + + steps + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor in the manner of for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + startVal + + : + float +
+
+
+ + endVal + + : + float +
+
+
+ + steps + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.log () + + +

+
+
+
+ Full Usage: + this.log () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the natural logarithm of the elements of input. +

+
+

+ \[y_{i} = \log_{e} (x_{i})\] +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.log10 () + + +

+
+
+
+ Full Usage: + this.log10 () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the logarithm to the base 10 of the elements of input. +

+
+

+ \[y_{i} = \log_{10} (x_{i})\] +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.logsoftmax dim + + +

+
+
+
+ Full Usage: + this.logsoftmax dim +
+
+ Parameters: +
    + + + dim + + : + int + - + A dimension along which softmax will be computed. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a softmax followed by a logarithm. +

+
+
+
+ + dim + + : + int +
+
+

+ A dimension along which softmax will be computed. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + int + +
    + + + endVal + + : + int + +
    + + + steps + + : + int + +
    + + + ?baseVal + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor in the manner of for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + startVal + + : + int +
+
+
+ + endVal + + : + int +
+
+
+ + steps + + : + int +
+
+
+ + ?baseVal + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.logspaceLike (startVal, endVal, steps, ?baseVal, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + startVal + + : + float + +
    + + + endVal + + : + float + +
    + + + steps + + : + int + +
    + + + ?baseVal + + : + float + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor in the manner of for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + startVal + + : + float +
+
+
+ + endVal + + : + float +
+
+
+ + steps + + : + int +
+
+
+ + ?baseVal + + : + float +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.logsumexp (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + this.logsumexp (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a logsumexp. +

+
+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.lt b + + +

+
+
+
+ Full Usage: + this.lt b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes element-wise \(a < b\), returning a boolean tensor containing a true at each location where the comparison is true +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.matmul b + + +

+
+
+
+ Full Usage: + this.matmul b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Matrix product of two tensors. +

+
+

+

+ The behavior depends on the dimensionality of the tensors as follows: +

+ If both tensors are 1-dimensional, the dot product (scalar) is returned. +

+ If both arguments are 2-dimensional, the matrix-matrix product is returned. +

+ If the first argument is 1-dimensional and the second argument is 2-dimensional, a 1 is prepended to its dimension for the purpose of the matrix multiply. After the matrix multiply, the prepended dimension is removed. +

+ If the first argument is 2-dimensional and the second argument is 1-dimensional, the matrix-vector product is returned. +

+ If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a + batched matrix multiply is returned. If the first argument is 1-dimensional, a 1 is prepended to its dimension for the + purpose of the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to + its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (i.e. batch) dimensions + are broadcasted (and thus must be broadcastable). For example, if input is a (j \times 1 \times n \times m)(j×1×n×m) + tensor and other is a (k \times m \times p)(k×m×p) tensor, out will be an (j \times k \times n \times p)(j×k×n×p) + tensor. +

+

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.max b + + +

+
+
+
+ Full Usage: + this.max b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise maximum of the elements in the two tensors. + +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.max () + + +

+
+
+
+ Full Usage: + this.max () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the maximum value of all elements in the input tensor. + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.max (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + this.max (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the maximum value along the given dimension of all elements in the input tensor. + +

+
+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.maxpool1d (kernelSize, ?stride, ?padding) + + +

+
+
+
+ Full Usage: + this.maxpool1d (kernelSize, ?stride, ?padding) +
+
+ Parameters: +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D max pooling over an input signal composed of several input planes. +

+
+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.maxpool1di (kernelSize, ?stride, ?padding) + + +

+
+
+
+ Full Usage: + this.maxpool1di (kernelSize, ?stride, ?padding) +
+
+ Parameters: +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 1D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.maxpool2d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + this.maxpool2d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D max pooling over an input signal composed of several input planes. +

+
+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.maxpool2di (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + this.maxpool2di (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 2D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.maxpool3d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + this.maxpool3d (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSizes. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D max pooling over an input signal composed of several input planes. +

+
+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSizes. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.maxpool3di (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) + + +

+
+
+
+ Full Usage: + this.maxpool3di (?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings) +
+
+ Parameters: +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSize. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    +
+
+ + Returns: + Tensor * Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a 3D max pooling over an input signal composed of several input planes, returning the max indices along with the outputs. +

+
+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSize. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+
+
+ + Returns: + + Tensor * Tensor +
+
+
+
+
+ +

+ + + this.maxunpool1d (indices, kernelSize, ?stride, ?padding, ?outputSize) + + +

+
+
+
+ Full Usage: + this.maxunpool1d (indices, kernelSize, ?stride, ?padding, ?outputSize) +
+
+ Parameters: +
    + + + indices + + : + Tensor + - + The indices selected by maxpool1di. + +
    + + + kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?outputSize + + : + seq<int> + - + The targeted output size. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes a partial inverse of maxpool1di +

+
+
+
+ + indices + + : + Tensor +
+
+

+ The indices selected by maxpool1di. +

+
+
+ + kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?outputSize + + : + seq<int> +
+
+

+ The targeted output size. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.maxunpool2d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) + + +

+
+
+
+ Full Usage: + this.maxunpool2d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) +
+
+ Parameters: +
    + + + indices + + : + Tensor + - + The indices selected by maxpool2di. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSizes. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    + + + ?outputSize + + : + seq<int> + - + The targeted output size. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes a partial inverse of maxpool2di +

+
+
+
+ + indices + + : + Tensor +
+
+

+ The indices selected by maxpool2di. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSizes. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+ + ?outputSize + + : + seq<int> +
+
+

+ The targeted output size. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.maxunpool3d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) + + +

+
+
+
+ Full Usage: + this.maxunpool3d (indices, ?kernelSize, ?stride, ?padding, ?kernelSizes, ?strides, ?paddings, ?outputSize) +
+
+ Parameters: +
    + + + indices + + : + Tensor + - + The indices selected by maxpool3di. + +
    + + + ?kernelSize + + : + int + - + The size of the window to take a max over. + +
    + + + ?stride + + : + int + - + The stride of the window. Default value is kernelSize. + +
    + + + ?padding + + : + int + - + The implicit zero padding to be added on both sides. + +
    + + + ?kernelSizes + + : + seq<int> + - + The sizes of the window to take a max over. + +
    + + + ?strides + + : + seq<int> + - + The strides of the window. Default value is kernelSizes. + +
    + + + ?paddings + + : + seq<int> + - + The implicit zero paddings to be added on corresponding sides. + +
    + + + ?outputSize + + : + seq<int> + - + The targeted output size. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes a partial inverse of maxpool3di +

+
+
+
+ + indices + + : + Tensor +
+
+

+ The indices selected by maxpool3di. +

+
+
+ + ?kernelSize + + : + int +
+
+

+ The size of the window to take a max over. +

+
+
+ + ?stride + + : + int +
+
+

+ The stride of the window. Default value is kernelSize. +

+
+
+ + ?padding + + : + int +
+
+

+ The implicit zero padding to be added on both sides. +

+
+
+ + ?kernelSizes + + : + seq<int> +
+
+

+ The sizes of the window to take a max over. +

+
+
+ + ?strides + + : + seq<int> +
+
+

+ The strides of the window. Default value is kernelSizes. +

+
+
+ + ?paddings + + : + seq<int> +
+
+

+ The implicit zero paddings to be added on corresponding sides. +

+
+
+ + ?outputSize + + : + seq<int> +
+
+

+ The targeted output size. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mean (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + this.mean (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the mean value of each row of the input tensor in the given dimension dim. +

+
+

+ If keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mean () + + +

+
+
+
+ Full Usage: + this.mean () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the mean value of all elements in the input tensor +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.memorySize + + +

+
+
+
+ Full Usage: + this.memorySize +
+
+ + Returns: + int64 + +
+
+
+
+
+
+ + + + + + +

+ + Returns the size in bytes of the total memory used by this tensor. Depending on dtype, backend configuration, this is not guaranteed to be correct and can behave differently in different runtime environments. + +

+
+
+
+ + Returns: + + int64 +
+
+
+
+
+ +

+ + + this.min b + + +

+
+
+
+ Full Usage: + this.min b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the element-wise minimum of the elements in the two tensors. + +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.min () + + +

+
+
+
+ Full Usage: + this.min () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the minimum value of all elements in the input tensor. + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.min (dim, ?keepDim) + + +

+
+
+
+ Full Usage: + this.min (dim, ?keepDim) +
+
+ Parameters: +
    + + + dim + + : + int + +
    + + + ?keepDim + + : + bool + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the minimum value along the given dimension of all elements in the input tensor. + +

+
+
+
+ + dim + + : + int +
+
+
+ + ?keepDim + + : + bool +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.move (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.move (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with the same contents moved to the given configuration + +

+
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.move device + + +

+
+
+
+ Full Usage: + this.move device +
+
+ Parameters: +
    + + + device + + : + Device + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with the same contents moved to the given device + +

+
+
+
+ + device + + : + Device +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.move backend + + +

+
+
+
+ Full Usage: + this.move backend +
+
+ Parameters: +
    + + + backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with the same contents moved to the given backend + +

+
+
+
+ + backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mseLoss (target, ?reduction) + + +

+
+
+
+ Full Usage: + this.mseLoss (target, ?reduction) +
+
+ Parameters: +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Creates a criterion that measures the mean squared error (squared L2 norm) between each element in the input and the target. +

+
+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mul b + + +

+
+
+
+ Full Usage: + this.mul b +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Multiplies each element of the object tensor by the scalar b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.mul b + + +

+
+
+
+ Full Usage: + this.mul b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Multiplies each element of the object tensor by the corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.multinomial (numSamples, ?normalize, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.multinomial (numSamples, ?normalize, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + numSamples + + : + int + - + The number of samples to draw. + +
    + + + ?normalize + + : + bool + - + Indicates where the probabilities should first be normalized by their sum. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor where each row contains numSamples indices sampled from the multinomial probability distribution located in the corresponding row of tensor input. +

+
+
+
+ + numSamples + + : + int +
+
+

+ The number of samples to draw. +

+
+
+ + ?normalize + + : + bool +
+
+

+ Indicates where the probabilities should first be normalized by their sum. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.ne b + + +

+
+
+
+ Full Usage: + this.ne b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Computes element-wise \(a \neq b\), returning a boolean tensor containing a true at each location where the comparison is true +

+
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.neg () + + +

+
+
+
+ Full Usage: + this.neg () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the negative of the elements of the object tensor. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.nelement + + +

+
+
+
+ Full Usage: + this.nelement +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Gets the number of elements in the tensor + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.nestingTag + + +

+
+
+
+ Full Usage: + this.nestingTag +
+
+ + Returns: + uint32 + +
+
+
+
+
+
+ + + + + + +

+ + Gets the differentiation nesting tag of the tensor + +

+
+
+
+ + Returns: + + uint32 +
+
+
+
+
+ +

+ + + this.nllLoss (target, ?weight, ?reduction) + + +

+
+
+
+ Full Usage: + this.nllLoss (target, ?weight, ?reduction) +
+
+ Parameters: +
    + + + target + + : + Tensor + - + The target tensor. + +
    + + + ?weight + + : + Tensor + - + A optional manual rescaling weight given to the loss of each batch element. + +
    + + + ?reduction + + : + string + - + Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ The negative log likelihood loss. +

+
+
+
+ + target + + : + Tensor +
+
+

+ The target tensor. +

+
+
+ + ?weight + + : + Tensor +
+
+

+ A optional manual rescaling weight given to the loss of each batch element. +

+
+
+ + ?reduction + + : + string +
+
+

+ Optionally specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean'. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.noDiff () + + +

+
+
+
+ Full Usage: + this.noDiff () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the input tensor but with any support for automatic differentiation removed. + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.normalize () + + +

+
+
+
+ Full Usage: + this.normalize () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the tensor after min-max scaling + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.oneLike (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.oneLike (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a scalar '1' tensor for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.onehotLike (length, hot, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.onehotLike (length, hot, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + length + + : + int + +
    + + + hot + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a tensor in the manner of for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + length + + : + int +
+
+
+ + hot + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.onesLike (?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.onesLike (?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor filled with '1' values for the given shape, element type and configuration, defaulting to the + shape and configuration of the input tensor. + +

+
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.pad paddings + + +

+
+
+
+ Full Usage: + this.pad paddings +
+
+ Parameters: +
    + + + paddings + + : + seq<int> + - + The implicit paddings on corresponding sides of the input. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Add zero padding to each side of a tensor +

+
+
+
+ + paddings + + : + seq<int> +
+
+

+ The implicit paddings on corresponding sides of the input. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.parentOp + + +

+
+
+
+ Full Usage: + this.parentOp +
+
+ + Returns: + TensorOp + +
+
+
+
+
+
+ + + + + + +

+ + Gets the parent operation of a tensor used in reverse-mode differentiation + +

+
+
+
+ + Returns: + + TensorOp +
+
+
+
+
+ +

+ + + this.permute permutation + + +

+
+
+
+ Full Usage: + this.permute permutation +
+
+ Parameters: +
    + + + permutation + + : + seq<int> + - + The desired ordering of dimensions. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the original tensor with its dimensions permuted. +

+
+
+
+ + permutation + + : + seq<int> +
+
+

+ The desired ordering of dimensions. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.pow b + + +

+
+
+
+ Full Usage: + this.pow b +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises each element of the self tensor to the power of the scalar b. The resulting tensor is returned. +

+
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.pow b + + +

+
+
+
+ Full Usage: + this.pow b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises each element of the self tensor to the power of each corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.primal + + +

+
+
+
+ Full Usage: + this.primal +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets the value of the tensor ignoring its first derivative + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.primalDeep + + +

+
+
+
+ Full Usage: + this.primalDeep +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets the value of the tensor ignoring all its derivatives + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.primalRaw + + +

+
+
+
+ Full Usage: + this.primalRaw +
+
+ + Returns: + RawTensor + +
+
+
+
+
+
+ + + + + + +

+ + Gets the raw value of the tensor ignoring all its derivatives + +

+
+
+
+ + Returns: + + RawTensor +
+
+
+
+
+ +

+ + + this.randLike (?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.randLike (?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with random values drawn from the uniform distribution [0,1) for the + given shape, element type and configuration, defaulting to the shape and configuration of the input tensor. + +

+
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.randintLike (low, high, ?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.randintLike (low, high, ?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + low + + : + int + +
    + + + high + + : + int + +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with random integer values drawn from the given range, for the + given shape, element type and configuration, defaulting to the shape and configuration of the input tensor. + +

+
+
+
+ + low + + : + int +
+
+
+ + high + + : + int +
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.randnLike (?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.randnLike (?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor with random values drawn from the standard normal distribution, for the + given shape, element type and configuration, defaulting to the shape and configuration of the input tensor. + +

+
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.relu () + + +

+
+
+
+ Full Usage: + this.relu () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the rectified linear unit function element-wise. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.repeat (dim, times) + + +

+
+
+
+ Full Usage: + this.repeat (dim, times) +
+
+ Parameters: +
    + + + dim + + : + int + - + The dimension along which to repeat values. + +
    + + + times + + : + int + - + The number of repetitions for each element. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Repeat elements of a tensor +

+
+
+
+ + dim + + : + int +
+
+

+ The dimension along which to repeat values. +

+
+
+ + times + + : + int +
+
+

+ The number of repetitions for each element. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.reverse (?value, ?zeroDerivatives) + + +

+
+
+
+ Full Usage: + this.reverse (?value, ?zeroDerivatives) +
+
+ Parameters: +
    + + + ?value + + : + Tensor + - + The derivative value to propagate backwards. Should have the same shape with this tensor. + +
    + + + ?zeroDerivatives + + : + bool + - + Indicates whether any existing derivatives in the computation graph (for example from a previous reverse propagation that was executed) should be zeroed or not before starting this propagation. Default: true + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Propagate the reverse-mode derivative backwards in the computation graph, starting from this tensor. +

+
+
+
+ + ?value + + : + Tensor +
+
+

+ The derivative value to propagate backwards. Should have the same shape with this tensor. +

+
+
+ + ?zeroDerivatives + + : + bool +
+
+

+ Indicates whether any existing derivatives in the computation graph (for example from a previous reverse propagation that was executed) should be zeroed or not before starting this propagation. Default: true +

+
+
+
+
+ +

+ + + this.reverseDiff (?derivative, ?nestingTag) + + +

+
+
+
+ Full Usage: + this.reverseDiff (?derivative, ?nestingTag) +
+
+ Parameters: +
    + + + ?derivative + + : + Tensor + - + The derivative (adjoint) to assign to the new reverse-mode tensor. Defaults to an empty placeholder tensor. + +
    + + + ?nestingTag + + : + uint32 + - + The level nestingTag for nested differentiation. Defaults to the current global nesting level + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the input tensor with added support for reverse-mode automatic differentiation. + +

+
+

+ + Any tensors produced using this tensor will also support reverse-mode propagation. After the completion + of the corresponding reverse operation on the overall result tensor, the computed derivative + will be available. + +

+
+
+ + ?derivative + + : + Tensor +
+
+

+ The derivative (adjoint) to assign to the new reverse-mode tensor. Defaults to an empty placeholder tensor. +

+
+
+ + ?nestingTag + + : + uint32 +
+
+

+ The level nestingTag for nested differentiation. Defaults to the current global nesting level +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.reversePush value + + +

+
+
+
+ Full Usage: + this.reversePush value +
+
+ Parameters: +
    + + + value + + : + Tensor + - + The value to apply. + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Push the given value as part of the reverse-mode computation at the given output tensor. +

+
+
+
+ + value + + : + Tensor +
+
+

+ The value to apply. +

+
+
+
+
+ +

+ + + this.reverseReset zeroDerivatives + + +

+
+
+
+ Full Usage: + this.reverseReset zeroDerivatives +
+
+ Parameters: +
    + + + zeroDerivatives + + : + bool + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Reset the reverse mode computation graph associated with the given output tensor. +

+
+
+
+ + zeroDerivatives + + : + bool +
+
+
+
+
+ +

+ + + this.round () + + +

+
+
+
+ Full Usage: + this.round () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with each of the elements of input rounded to the closest integer. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.safelog ?epsilon + + +

+
+
+
+ Full Usage: + this.safelog ?epsilon +
+
+ Parameters: +
    + + + ?epsilon + + : + float + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the logarithm of the tensor after clamping the tensor so that all its elements are greater than epsilon. This is to avoid a -inf result for elements equal to zero. +

+
+
+
+ + ?epsilon + + : + float +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.save fileName + + +

+
+
+
+ Full Usage: + this.save fileName +
+
+ Parameters: +
    + + + fileName + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ Saves the tensor to the given file using a bespoke binary format. +

+
+

+ + The binary format records the elements, backend, element type and shape. It does not record the device. + The format used may change from version to version of Furnace. + +

+
+
+ + fileName + + : + string +
+
+
+
+
+ +

+ + + this.scalarLike (scalar, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.scalarLike (scalar, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + scalar + + : + scalar + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new scalar tensor for the given shape, element type and configuration, defaulting to the + shape and configuration of the input tensor. + +

+
+
+
+ + scalar + + : + scalar +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.scatter (dim, indices, destinationShape) + + +

+
+
+
+ Full Usage: + this.scatter (dim, indices, destinationShape) +
+
+ Parameters: +
    + + + dim + + : + int + - + The axis along which to index. + +
    + + + indices + + : + Tensor + - + The the indices of elements to gather. + +
    + + + destinationShape + + : + seq<int> + - + The destination shape. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Scatter values along an axis specified by dim. +

+
+
+
+ + dim + + : + int +
+
+

+ The axis along which to index. +

+
+
+ + indices + + : + Tensor +
+
+

+ The the indices of elements to gather. +

+
+
+ + destinationShape + + : + seq<int> +
+
+

+ The destination shape. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.shape + + +

+
+
+
+ Full Usage: + this.shape +
+
+ + Returns: + Shape + +
+
+
+
+
+
+ + + + + + +

+ + Gets the shape of the tensor + +

+
+
+
+ + Returns: + + Shape +
+
+
+
+
+ +

+ + + this.sigmoid () + + +

+
+
+
+ Full Usage: + this.sigmoid () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the sigmoid element-wise function +

+
+

+ \[\text{sigmoid}(x) = \frac{1}{1 + \exp(-x)}\] +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sign () + + +

+
+
+
+ Full Usage: + this.sign () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the signs of the elements of input. +

+
+

+ The tensor will have the same element type as the input tensor. +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sin () + + +

+
+
+
+ Full Usage: + this.sin () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the sine of the elements of input +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sinh () + + +

+
+
+
+ Full Usage: + this.sinh () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the hyperbolic sine of the elements of input. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.softmax dim + + +

+
+
+
+ Full Usage: + this.softmax dim +
+
+ Parameters: +
    + + + dim + + : + int + - + A dimension along which softmax will be computed. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies a softmax function. +

+
+

+ Softmax is defined as: \text{softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}. +

+
+
+ + dim + + : + int +
+
+

+ A dimension along which softmax will be computed. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.softplus () + + +

+
+
+
+ Full Usage: + this.softplus () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Applies the softplus function element-wise. +

+
+

+ \[\text{softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))\] +

+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.split (sizes, ?dim) + + +

+
+
+
+ Full Usage: + this.split (sizes, ?dim) +
+
+ Parameters: +
    + + + sizes + + : + seq<int> + - + List of sizes for each chunk + +
    + + + ?dim + + : + int + - + The dimension along which to split the tensor, defaults to 0. + +
    +
+
+ + Returns: + Tensor[] + +
+
+
+
+
+
+ + + + + + +

+ Splits the tensor into chunks. Each chunk is a view of the original tensor. +

+
+
+
+ + sizes + + : + seq<int> +
+
+

+ List of sizes for each chunk +

+
+
+ + ?dim + + : + int +
+
+

+ The dimension along which to split the tensor, defaults to 0. +

+
+
+
+
+ + Returns: + + Tensor[] +
+
+
+
+
+ +

+ + + this.sqrt () + + +

+
+
+
+ Full Usage: + this.sqrt () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the square-root of the elements of input. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.squeeze ?dim + + +

+
+
+
+ Full Usage: + this.squeeze ?dim +
+
+ Parameters: +
    + + + ?dim + + : + int + - + If given, the input will be squeezed only in this dimension. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor with all the dimensions of input of size 1 removed. +

+
+

+ If the tensor has a batch dimension of size 1, then squeeze(input) will also remove the batch dimension, which can lead to unexpected errors. +

+
+
+ + ?dim + + : + int +
+
+

+ If given, the input will be squeezed only in this dimension. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.standardize () + + +

+
+
+
+ Full Usage: + this.standardize () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns the tensor after standardization (z-score normalization) + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.std ?unbiased + + +

+
+
+
+ Full Usage: + this.std ?unbiased +
+
+ Parameters: +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the standard deviation of all elements in the input tensor. +

+
+

+ If unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.std (dim, ?keepDim, ?unbiased) + + +

+
+
+
+ Full Usage: + this.std (dim, ?keepDim, ?unbiased) +
+
+ Parameters: +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the standard deviation of each row of the input tensor in the given dimension dim. +

+
+

+

If keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension(s).

If unbiased is False, then the standard deviation will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used.

+

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sub b + + +

+
+
+
+ Full Usage: + this.sub b +
+
+ Parameters: +
    + + + b + + : + scalar + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Subtracts the scalar b from the corresponding element of the object tensor. The resulting tensor is returned. +

+
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sub b + + +

+
+
+
+ Full Usage: + this.sub b +
+
+ Parameters: +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Subtracts each element of the object tensor from the corresponding element of the self tensor. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sum (dim, ?keepDim, ?dtype) + + +

+
+
+
+ Full Usage: + this.sum (dim, ?keepDim, ?dtype) +
+
+ Parameters: +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    + + + ?dtype + + : + Dtype + - + The desired data type of returned tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the sum of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. +

+
+

+ If keepdim is true, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension. +

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired data type of returned tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sum ?dtype + + +

+
+
+
+ Full Usage: + this.sum ?dtype +
+
+ Parameters: +
    + + + ?dtype + + : + Dtype + - + The desired data type of returned tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the sum of all elements in the input tensor. +

+
+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired data type of returned tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.sumToSize (newShape, ?dtype) + + +

+
+
+
+ Full Usage: + this.sumToSize (newShape, ?dtype) +
+
+ Parameters: +
    + + + newShape + + : + int[] + +
    + + + ?dtype + + : + Dtype + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Sum this tensor to size newShape, which must be broadcastable to this tensor size. +

+
+
+
+ + newShape + + : + int[] +
+
+
+ + ?dtype + + : + Dtype +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.summary () + + +

+
+
+
+ Full Usage: + this.summary () +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Returns a string summarising the tensor + +

+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + this.tan () + + +

+
+
+
+ Full Usage: + this.tan () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the tangent of the elements of input +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.tanh () + + +

+
+
+
+ Full Usage: + this.tanh () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the hyperbolic tangent of the elements of input. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.toArray () + + +

+
+
+
+ Full Usage: + this.toArray () +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a (non-scalar) tensor as an array + +

+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + this.toArray1D () + + +

+
+
+
+ Full Usage: + this.toArray1D () +
+
+ + Returns: + 'T[] + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a 1D tensor as a 1D array + +

+
+
+
+ + Returns: + + 'T[] +
+
+
+
+
+ +

+ + + this.toArray2D () + + +

+
+
+
+ Full Usage: + this.toArray2D () +
+
+ + Returns: + 'T[,] + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a 2D tensor as a 2D array + +

+
+
+
+ + Returns: + + 'T[,] +
+
+
+
+
+ +

+ + + this.toArray3D () + + +

+
+
+
+ Full Usage: + this.toArray3D () +
+
+ + Returns: + 'T[,,] + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a 3D tensor as a 3D array + +

+
+
+
+ + Returns: + + 'T[,,] +
+
+
+
+
+ +

+ + + this.toArray4D () + + +

+
+
+
+ Full Usage: + this.toArray4D () +
+
+ + Returns: + 'T[,,,] + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a 4D tensor as a 4D array + +

+
+
+
+ + Returns: + + 'T[,,,] +
+
+
+
+
+ +

+ + + this.toArray5D () + + +

+
+
+
+ Full Usage: + this.toArray5D () +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a 5D tensor as a 5D array + +

+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + this.toArray6D () + + +

+
+
+
+ Full Usage: + this.toArray6D () +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a 6D tensor as a 6D array + +

+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + this.toBool () + + +

+
+
+
+ Full Usage: + this.toBool () +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a boolean value + +

+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + this.toByte () + + +

+
+
+
+ Full Usage: + this.toByte () +
+
+ + Returns: + byte + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a byte value + +

+
+
+
+ + Returns: + + byte +
+
+
+
+
+ +

+ + + this.toDouble () + + +

+
+
+
+ Full Usage: + this.toDouble () +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float64 value + +

+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + this.toImage (?pixelMin, ?pixelMax, ?normalize, ?gridCols) + + +

+
+
+
+ Full Usage: + this.toImage (?pixelMin, ?pixelMax, ?normalize, ?gridCols) +
+
+ Parameters: +
    + + + ?pixelMin + + : + double + +
    + + + ?pixelMax + + : + double + +
    + + + ?normalize + + : + bool + +
    + + + ?gridCols + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Convert tensor to an image tensor with shape Channels x Height x Width +

+
+
+
+ + ?pixelMin + + : + double +
+
+
+ + ?pixelMax + + : + double +
+
+
+ + ?normalize + + : + bool +
+
+
+ + ?gridCols + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.toImageString (?pixelMin, ?pixelMax, ?normalize, ?gridCols, ?asciiPalette) + + +

+
+
+
+ Full Usage: + this.toImageString (?pixelMin, ?pixelMax, ?normalize, ?gridCols, ?asciiPalette) +
+
+ Parameters: +
    + + + ?pixelMin + + : + double + +
    + + + ?pixelMax + + : + double + +
    + + + ?normalize + + : + bool + +
    + + + ?gridCols + + : + int + +
    + + + ?asciiPalette + + : + string + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ Convert tensor to a grayscale image tensor and return a string representation approximating grayscale values +

+
+
+
+ + ?pixelMin + + : + double +
+
+
+ + ?pixelMax + + : + double +
+
+
+ + ?normalize + + : + bool +
+
+
+ + ?gridCols + + : + int +
+
+
+ + ?asciiPalette + + : + string +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + this.toInt16 () + + +

+
+
+
+ Full Usage: + this.toInt16 () +
+
+ + Returns: + int16 + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to an int16 value + +

+
+
+
+ + Returns: + + int16 +
+
+
+
+
+ +

+ + + this.toInt32 () + + +

+
+
+
+ Full Usage: + this.toInt32 () +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to an int32 value + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + this.toInt64 () + + +

+
+
+
+ Full Usage: + this.toInt64 () +
+
+ + Returns: + int64 + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to an int64 value + +

+
+
+
+ + Returns: + + int64 +
+
+
+
+
+ +

+ + + this.toSByte () + + +

+
+
+
+ Full Usage: + this.toSByte () +
+
+ + Returns: + sbyte + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a signed byte value + +

+
+
+
+ + Returns: + + sbyte +
+
+
+
+
+ +

+ + + this.toScalar () + + +

+
+
+
+ Full Usage: + this.toScalar () +
+
+ + Returns: + scalar + +
+
+
+
+
+
+ + + + + + +

+ + Returns the value of a scalar tensor as an object + +

+
+
+
+ + Returns: + + scalar +
+
+
+
+
+ +

+ + + this.toSingle () + + +

+
+
+
+ Full Usage: + this.toSingle () +
+
+ + Returns: + float32 + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + Returns: + + float32 +
+
+
+
+
+ +

+ + + this.trace () + + +

+
+
+
+ Full Usage: + this.trace () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the sum of the elements of the diagonal of the input 2-D matrix. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.transpose () + + +

+
+
+
+ Full Usage: + this.transpose () +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor that is a transposed version of input with dimensions 0 and 1 swapped. +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.transpose (dim0, dim1) + + +

+
+
+
+ Full Usage: + this.transpose (dim0, dim1) +
+
+ Parameters: +
    + + + dim0 + + : + int + - + The first dimension to be transposed. + +
    + + + dim1 + + : + int + - + The second dimension to be transposed. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a tensor that is a transposed version of input. The given dimensions dim0 and dim1 are swapped. +

+
+
+
+ + dim0 + + : + int +
+
+

+ The first dimension to be transposed. +

+
+
+ + dim1 + + : + int +
+
+

+ The second dimension to be transposed. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.undilate dilations + + +

+
+
+
+ Full Usage: + this.undilate dilations +
+
+ Parameters: +
    + + + dilations + + : + seq<int> + - + The dilations to use. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Reverse the dilation of the tensor in using the given dilations in each corresponding dimension. +

+
+
+
+ + dilations + + : + seq<int> +
+
+

+ The dilations to use. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.unflatten (dim, unflattenedShape) + + +

+
+
+
+ Full Usage: + this.unflatten (dim, unflattenedShape) +
+
+ Parameters: +
    + + + dim + + : + int + - + The dimension to unflatten. + +
    + + + unflattenedShape + + : + seq<int> + - + New shape of the unflattened dimenension. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Unflattens a tensor dimension by expanding it to the given shape. +

+
+
+
+ + dim + + : + int +
+
+

+ The dimension to unflatten. +

+
+
+ + unflattenedShape + + : + seq<int> +
+
+

+ New shape of the unflattened dimenension. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.unsqueeze dim + + +

+
+
+
+ Full Usage: + this.unsqueeze dim +
+
+ Parameters: +
    + + + dim + + : + int + - + The index at which to insert the singleton dimension. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with a dimension of size one inserted at the specified position +

+
+
+
+ + dim + + : + int +
+
+

+ The index at which to insert the singleton dimension. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.unsqueezeAs other + + +

+
+
+
+ Full Usage: + this.unsqueezeAs other +
+
+ Parameters: +
    + + + other + + : + Tensor + - + The other tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with dimensions of size one appended to the end until the number of dimensions is the same as the other tensor. +

+
+
+
+ + other + + : + Tensor +
+
+

+ The other tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.unstack ?dim + + +

+
+
+
+ Full Usage: + this.unstack ?dim +
+
+ Parameters: +
    + + + ?dim + + : + int + - + The dimension to remove, defaults to 0. + +
    +
+
+ + Returns: + Tensor[] + + Returns an array of all slices along a given dimension. +
+
+
+
+
+
+ + + + + + +

+ Removes a tensor dimension. +

+
+
+
+ + ?dim + + : + int +
+
+

+ The dimension to remove, defaults to 0. +

+
+
+
+
+ + Returns: + + Tensor[] +
+
+

+ Returns an array of all slices along a given dimension. +

+
+
+
+
+ +

+ + + this.var (dim, ?keepDim, ?unbiased) + + +

+
+
+
+ Full Usage: + this.var (dim, ?keepDim, ?unbiased) +
+
+ Parameters: +
    + + + dim + + : + int + - + The dimension to reduce. + +
    + + + ?keepDim + + : + bool + - + Whether the output tensor has dim retained or not. + +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the variance of each row of the input tensor in the given dimension dim. +

+
+

+

If keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension(s).

If unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used.

+

+
+
+ + dim + + : + int +
+
+

+ The dimension to reduce. +

+
+
+ + ?keepDim + + : + bool +
+
+

+ Whether the output tensor has dim retained or not. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.var ?unbiased + + +

+
+
+
+ Full Usage: + this.var ?unbiased +
+
+ Parameters: +
    + + + ?unbiased + + : + bool + - + Whether to use the unbiased estimation or not. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns the variance of all elements in the input tensor. +

+
+

+ If unbiased is False, then the variance will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used. +

+
+
+ + ?unbiased + + : + bool +
+
+

+ Whether to use the unbiased estimation or not. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.view shape + + +

+
+
+
+ Full Usage: + this.view shape +
+
+ Parameters: +
    + + + shape + + : + int + - + the desired shape + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the same data as the object tensor but of a different shape. +

+
+

+ + The returned tensor shares the same data and must have the same number of elements, but may have a different size. + For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension, + or only span across original dimensions \(d, d+1, \dots, d+kd,d+1,…,d+k\) that satisfy the following contiguity-like condition that + \(\forall i = d, \dots, d+k-1∀i=d,…,d+k−1 ,\) \[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\] + +

+
+
+ + shape + + : + int +
+
+

+ the desired shape +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.view shape + + +

+
+
+
+ Full Usage: + this.view shape +
+
+ Parameters: +
    + + + shape + + : + seq<int> + - + The desired shape of returned tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the same data as the self tensor but of a different shape. +

+
+

+ + The returned tensor shares the same data and must have the same number of elements, but may have a different size. + For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension, + or only span across original dimensions \(d, d+1, \dots, d+kd,d+1,…,d+k\) that satisfy the following contiguity-like condition that + \(\forall i = d, \dots, d+k-1∀i=d,…,d+k−1 ,\) \[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\] + +

+
+
+ + shape + + : + seq<int> +
+
+

+ The desired shape of returned tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.viewAs other + + +

+
+
+
+ Full Usage: + this.viewAs other +
+
+ Parameters: +
    + + + other + + : + Tensor + - + The result tensor has the same size as other. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ View this tensor as the same size as other. +

+
+

+ The returned tensor shares the same data and must have the same number of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size. + The returned tensor shares the same data and must have the same number of elements, but may have a different size. + For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension, + or only span across original dimensions \(d, d+1, \dots, d+kd,d+1,…,d+k\) that satisfy the following contiguity-like condition that + \(\forall i = d, \dots, d+k-1∀i=d,…,d+k−1 ,\) \[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\] + +

+
+
+ + other + + : + Tensor +
+
+

+ The result tensor has the same size as other. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.zeroLike (?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.zeroLike (?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a scalar '0' tensor for the given element type and configuration, defaulting to + the element type and configuration of the input tensor. + +

+
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + this.zerosLike (?shape, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + this.zerosLike (?shape, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + ?shape + + : + seq<int> + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Returns a new tensor filled with '0' values for the given shape, element type and configuration, defaulting to the + shape and configuration of the input tensor. + +

+
+
+
+ + ?shape + + : + seq<int> +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + a * b + + +

+
+
+
+ Full Usage: + a * b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Multiplies the scalar a by each element of the tensor b. The resulting tensor is returned. +

+
+
+
+ + a + + : + scalar +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a * b + + +

+
+
+
+ Full Usage: + a * b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Multiplies each element of the tensor a by the scalar b. The resulting tensor is returned. +

+
+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a * b + + +

+
+
+
+ Full Usage: + a * b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Multiplies each element of the tensor a by the corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a + b + + +

+
+
+
+ Full Usage: + a + b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ The scalar a is added to each element of the tensor b. The resulting tensor is returned. +

+
+
+
+ + a + + : + scalar +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a + b + + +

+
+
+
+ Full Usage: + a + b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Each element of the tensor a is added to the scalar b. The resulting tensor is returned. +

+
+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a + b + + +

+
+
+
+ Full Usage: + a + b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Each element of the tensor a is added to each corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a - b + + +

+
+
+
+ Full Usage: + a - b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Subtracts each element of the tensore b from the scalar a. The resulting tensor is returned. +

+
+
+
+ + a + + : + scalar +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a - b + + +

+
+
+
+ Full Usage: + a - b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Subtracts the scalar b from the corresponding element of the tensor a. The resulting tensor is returned. +

+
+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a - b + + +

+
+
+
+ Full Usage: + a - b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Subtracts each element of the tensor b from the corresponding element of the tensor a. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + t --> f + + +

+
+
+
+ Full Usage: + t --> f +
+
+ Parameters: +
    + + + t + + : + Tensor + +
    + + + f + + : + Tensor -> ^a + +
    +
+
+ + Returns: + ^a + +
+ Modifiers: + inline +
+ Type parameters: + ^a +
+
+
+
+
+ + + + + + +

+ Pipeline the tensor into a function. +

+
+
+
+ + t + + : + Tensor +
+
+
+ + f + + : + Tensor -> ^a +
+
+
+
+
+ + Returns: + + ^a +
+
+
+
+
+ +

+ + + a / b + + +

+
+
+
+ Full Usage: + a / b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Divides the scalar a by the each element of the tensor b. The resulting tensor is returned. +

+
+
+
+ + a + + : + scalar +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a / b + + +

+
+
+
+ Full Usage: + a / b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Divides each element of the tensor a by the scalar b. The resulting tensor is returned. +

+
+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + a / b + + +

+
+
+
+ Full Usage: + a / b +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Divides each element of the tensor a by the corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + ~-a + + +

+
+
+
+ Full Usage: + ~-a +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a new tensor with the negative of the elements of a. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Abs(a) + + +

+
+
+
+ Full Usage: + Tensor.Abs(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function abs. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Acos(t) + + +

+
+
+
+ Full Usage: + Tensor.Acos(t) +
+
+ Parameters: +
    + + + t + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function acos. +

+
+
+
+ + t + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Asin(t) + + +

+
+
+
+ Full Usage: + Tensor.Asin(t) +
+
+ Parameters: +
    + + + t + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function asin. +

+
+
+
+ + t + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Atan(t) + + +

+
+
+
+ Full Usage: + Tensor.Atan(t) +
+
+ Parameters: +
    + + + t + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function atan. +

+
+
+
+ + t + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Ceiling(a) + + +

+
+
+
+ Full Usage: + Tensor.Ceiling(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function ceil. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Cos(a) + + +

+
+
+
+ Full Usage: + Tensor.Cos(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function cos. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Cosh(t) + + +

+
+
+
+ Full Usage: + Tensor.Cosh(t) +
+
+ Parameters: +
    + + + t + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function cosh. +

+
+
+
+ + t + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Exp(a) + + +

+
+
+
+ Full Usage: + Tensor.Exp(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function exp. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Floor(a) + + +

+
+
+
+ Full Usage: + Tensor.Floor(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function floor. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Log(a) + + +

+
+
+
+ Full Usage: + Tensor.Log(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function log. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Log10(a) + + +

+
+
+
+ Full Usage: + Tensor.Log10(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function log10. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.One + + +

+
+
+
+ Full Usage: + Tensor.One +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Get the scalar one tensor for the current configuration + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Op(ext) + + +

+
+
+
+ Full Usage: + Tensor.Op(ext) +
+
+ Parameters: +
    + + + ext + + : + BinaryOp + - + The definition of the new op. + +
    +
+
+ + Returns: + Tensor * Tensor -> Tensor + + The new op. +
+
+
+
+
+
+ + + + + + +

+ Allows the definition of a new binary tensor op. +

+
+
+
+ + ext + + : + BinaryOp +
+
+

+ The definition of the new op. +

+
+
+
+
+ + Returns: + + Tensor * Tensor -> Tensor +
+
+

+ The new op. +

+
+
+
+
+ +

+ + + Tensor.Op(ext) + + +

+
+
+
+ Full Usage: + Tensor.Op(ext) +
+
+ Parameters: +
    + + + ext + + : + UnaryOp + - + The definition of the new op. + +
    +
+
+ + Returns: + Tensor -> Tensor + + The new op. +
+
+
+
+
+
+ + + + + + +

+ Allows the definition of a new unary tensor op. +

+
+
+
+ + ext + + : + UnaryOp +
+
+

+ The definition of the new op. +

+
+
+
+
+ + Returns: + + Tensor -> Tensor +
+
+

+ The new op. +

+
+
+
+
+ +

+ + + Tensor.Pow(a, b) + + +

+
+
+
+ Full Usage: + Tensor.Pow(a, b) +
+
+ Parameters: +
    + + + a + + : + int + +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises the scalar a to the power of each element of the tensor b. The resulting tensor is returned. +

+
+
+
+ + a + + : + int +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Pow(a, b) + + +

+
+
+
+ Full Usage: + Tensor.Pow(a, b) +
+
+ Parameters: +
    + + + a + + : + float + +
    + + + b + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises the scalar a to the power of each element of the tensor b. The resulting tensor is returned. +

+
+
+
+ + a + + : + float +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Pow(a, b) + + +

+
+
+
+ Full Usage: + Tensor.Pow(a, b) +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises the scalar a to the power of each element of the tensor b. The resulting tensor is returned. +

+
+
+
+ + a + + : + scalar +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Pow(a, b) + + +

+
+
+
+ Full Usage: + Tensor.Pow(a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    + + + b + + : + int + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises each element of the tensor a to the power of the scalar b. The resulting tensor is returned. +

+
+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + int +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Pow(a, b) + + +

+
+
+
+ Full Usage: + Tensor.Pow(a, b) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    + + + b + + : + float + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises each element of the tensor a to the power of the scalar b. The resulting tensor is returned. +

+
+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + float +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Pow(a, b) + + +

+
+
+
+ Full Usage: + Tensor.Pow(a, b) +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises each element of the tensor a to the power of the scalar b. The resulting tensor is returned. +

+
+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + scalar +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Pow(a, b) + + +

+
+
+
+ Full Usage: + Tensor.Pow(a, b) +
+
+ Parameters: + +
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Raises each element of the tensor a to the power of the corresponding element of the tensor b. The resulting tensor is returned. +

+
+

+ The shapes of the two tensors must be broadcastable. +

+
+
+ + a + + : + Tensor +
+
+
+ + b + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Round(a) + + +

+
+
+
+ Full Usage: + Tensor.Round(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function round. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Sin(a) + + +

+
+
+
+ Full Usage: + Tensor.Sin(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function sin. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Sinh(a) + + +

+
+
+
+ Full Usage: + Tensor.Sinh(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function sinh. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Sqrt(a) + + +

+
+
+
+ Full Usage: + Tensor.Sqrt(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function sqrt. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Tan(a) + + +

+
+
+
+ Full Usage: + Tensor.Tan(a) +
+
+ Parameters: +
    + + + a + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function tan. +

+
+
+
+ + a + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Tanh(t) + + +

+
+
+
+ Full Usage: + Tensor.Tanh(t) +
+
+ Parameters: +
    + + + t + + : + Tensor + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ A method to enable the use of the F# function tanh. +

+
+
+
+ + t + + : + Tensor +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.Zero + + +

+
+
+
+ Full Usage: + Tensor.Zero +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Get the scalar zero tensor for the current configuration + +

+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.cat (tensors, ?dim) + + +

+
+
+
+ Full Usage: + Tensor.cat (tensors, ?dim) +
+
+ Parameters: +
    + + + tensors + + : + seq<Tensor> + - + The tensors to concatenate. + +
    + + + ?dim + + : + int + - + The dimension over which the tensors are concatenated, defaults to 0. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Concatenates the given sequence of seq tensors in the given dimension. +

+
+

+ All tensors must either have the same shape (except in the concatenating dimension) or be empty. +

+
+
+ + tensors + + : + seq<Tensor> +
+
+

+ The tensors to concatenate. +

+
+
+ + ?dim + + : + int +
+
+

+ The dimension over which the tensors are concatenated, defaults to 0. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.create (value, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + Tensor.create (value, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + value + + : + obj + - + The .NET object used to form the initial values for the tensor. + +
    + + + ?device + + : + Device + - + The desired device of returned tensor. Default: if None, uses Device.Default. + +
    + + + ?dtype + + : + Dtype + - + The desired element type of returned tensor. Default: if None, uses Dtype.Default. + +
    + + + ?backend + + : + Backend + - + The desired backend of returned tensor. Default: if None, uses Backend.Default. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Creates a new tensor from the given data, using the given element type and configuration. + +

+
+

+ The fastest creation technique is a one dimensional array matching the desired dtype. Then use 'view' to reshape. +

+
+
+ + value + + : + obj +
+
+

+ The .NET object used to form the initial values for the tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The desired device of returned tensor. Default: if None, uses Device.Default. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The desired element type of returned tensor. Default: if None, uses Dtype.Default. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The desired backend of returned tensor. Default: if None, uses Backend.Default. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.eye (rows, ?cols, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + Tensor.eye (rows, ?cols, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + rows + + : + int + +
    + + + ?cols + + : + int + +
    + + + ?device + + : + Device + +
    + + + ?dtype + + : + Dtype + +
    + + + ?backend + + : + Backend + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. +

+
+
+
+ + rows + + : + int +
+
+
+ + ?cols + + : + int +
+
+
+ + ?device + + : + Device +
+
+
+ + ?dtype + + : + Dtype +
+
+
+ + ?backend + + : + Backend +
+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.load (fileName, ?device, ?dtype, ?backend) + + +

+
+
+
+ Full Usage: + Tensor.load (fileName, ?device, ?dtype, ?backend) +
+
+ Parameters: +
    + + + fileName + + : + string + - + The file from which to load the tensor. + +
    + + + ?device + + : + Device + - + The device of the resulting tensor. Defaults to the current default device. + +
    + + + ?dtype + + : + Dtype + - + The element type of the resulting tensor. Defaults to the element type of the saved tensor. + +
    + + + ?backend + + : + Backend + - + The device of the resulting tensor. Defaults to the current default backend. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Loads the tensor from the given file using the given element type and configuration. +

+
+

+ + The backend at the time of saving the tensor must be available when the tensor is reloaded. + The tensor is first loaded into that backend and then moved. As a result, intermediate tensors may be created + in the process of reloading. + +

+
+
+ + fileName + + : + string +
+
+

+ The file from which to load the tensor. +

+
+
+ + ?device + + : + Device +
+
+

+ The device of the resulting tensor. Defaults to the current default device. +

+
+
+ + ?dtype + + : + Dtype +
+
+

+ The element type of the resulting tensor. Defaults to the element type of the saved tensor. +

+
+
+ + ?backend + + : + Backend +
+
+

+ The device of the resulting tensor. Defaults to the current default backend. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + Tensor.ofRawTensor rawTensor + + +

+
+
+
+ Full Usage: + Tensor.ofRawTensor rawTensor +
+
+ Parameters: +
    + + + rawTensor + + : + RawTensor + - + The given raw tensor. + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ + Creates a new tensor from the raw tensor. + +

+
+
+
+ + rawTensor + + : + RawTensor +
+
+

+ The given raw tensor. +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + int64 + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + int64 +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + int32 + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + int32 +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + int16 + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + int16 +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + int8 + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + int8 +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + byte + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + byte +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + double + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + double +
+
+
+
+
+ +

+ + + op_Explicittensor + + +

+
+
+
+ Full Usage: + op_Explicittensor +
+
+ Parameters: +
    + + + tensor + + : + Tensor + +
    +
+
+ + Returns: + single + +
+
+
+
+
+
+ + + + + + +

+ + Convert a scalar tensor to a float32 value + +

+
+
+
+ + tensor + + : + Tensor +
+
+
+
+
+ + Returns: + + single +
+
+
+
+
+ +

+ + + Tensor.stack (tensors, ?dim) + + +

+
+
+
+ Full Usage: + Tensor.stack (tensors, ?dim) +
+
+ Parameters: +
    + + + tensors + + : + seq<Tensor> + - + sequence of tensors to concatenate + +
    + + + ?dim + + : + int + - + dimension to insert. Has to be between 0 and the number of dimensions of concatenated tensors (inclusive) + +
    +
+
+ + Returns: + Tensor + +
+
+
+
+
+
+ + + + + + +

+ Concatenates sequence of tensors along a new dimension. +

+
+

+ All tensors need to be of the same shape. +

+
+
+ + tensors + + : + seq<Tensor> +
+
+

+ sequence of tensors to concatenate +

+
+
+ + ?dim + + : + int +
+
+

+ dimension to insert. Has to be between 0 and the number of dimensions of concatenated tensors (inclusive) +

+
+
+
+
+ + Returns: + + Tensor +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-tensorop.html b/reference/furnace-tensorop.html new file mode 100644 index 00000000..c8b2b929 --- /dev/null +++ b/reference/furnace-tensorop.html @@ -0,0 +1,6998 @@ + + + + + TensorOp (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ TensorOp Type +

+ +
+
+

+ +

+
+
+
+
+
+
+

+ Union cases +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Union case + + Description +
+
+ +

+ + + AbsT Tensor + + +

+
+
+
+ Full Usage: + AbsT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + AcosT Tensor + + +

+
+
+
+ Full Usage: + AcosT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + AddTConstTSlice(int[], Tensor) + + +

+
+
+
+ Full Usage: + AddTConstTSlice(int[], Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + int[] + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + int[] +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + AddTT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + AddTT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + AddTT0(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + AddTT0(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + AddTT0Const Tensor + + +

+
+
+
+ Full Usage: + AddTT0Const Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + AddTTConst Tensor + + +

+
+
+
+ Full Usage: + AddTTConst Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + AddTTConstSlice Tensor + + +

+
+
+
+ Full Usage: + AddTTConstSlice Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + AddTTSlice(Tensor, int[], Tensor) + + +

+
+
+
+ Full Usage: + AddTTSlice(Tensor, int[], Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int[] + +
    + + + Item3 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int[] +
+
+
+ + Item3 + + : + Tensor +
+
+
+
+
+ +

+ + + AsinT Tensor + + +

+
+
+
+ Full Usage: + AsinT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + AtanT Tensor + + +

+
+
+
+ Full Usage: + AtanT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + CatTs(Tensor[], dim) + + +

+
+
+
+ Full Usage: + CatTs(Tensor[], dim) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor[] + +
    + + + dim + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ +

+ + + CeilT Tensor + + +

+
+
+
+ Full Usage: + CeilT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + ClampT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + ClampT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + Conv1DTConstT(Tensor, Tensor, int, int) + + +

+
+
+
+ Full Usage: + Conv1DTConstT(Tensor, Tensor, int, int) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int + +
    + + + Item4 + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int +
+
+
+ + Item4 + + : + int +
+
+
+
+
+ +

+ + + Conv1DTT(Tensor, Tensor, int, int) + + +

+
+
+
+ Full Usage: + Conv1DTT(Tensor, Tensor, int, int) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int + +
    + + + Item4 + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int +
+
+
+ + Item4 + + : + int +
+
+
+
+
+ +

+ + + Conv1DTTConst(Tensor, Tensor, int, int) + + +

+
+
+
+ Full Usage: + Conv1DTTConst(Tensor, Tensor, int, int) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int + +
    + + + Item4 + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int +
+
+
+ + Item4 + + : + int +
+
+
+
+
+ +

+ + + Conv2DTConstT(Tensor, Tensor, int[], int[]) + + +

+
+
+
+ Full Usage: + Conv2DTConstT(Tensor, Tensor, int[], int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    + + + Item4 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+ + Item4 + + : + int[] +
+
+
+
+
+ +

+ + + Conv2DTT(Tensor, Tensor, int[], int[]) + + +

+
+
+
+ Full Usage: + Conv2DTT(Tensor, Tensor, int[], int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    + + + Item4 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+ + Item4 + + : + int[] +
+
+
+
+
+ +

+ + + Conv2DTTConst(Tensor, Tensor, int[], int[]) + + +

+
+
+
+ Full Usage: + Conv2DTTConst(Tensor, Tensor, int[], int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    + + + Item4 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+ + Item4 + + : + int[] +
+
+
+
+
+ +

+ + + Conv3DTConstT(Tensor, Tensor, int[], int[]) + + +

+
+
+
+ Full Usage: + Conv3DTConstT(Tensor, Tensor, int[], int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    + + + Item4 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+ + Item4 + + : + int[] +
+
+
+
+
+ +

+ + + Conv3DTT(Tensor, Tensor, int[], int[]) + + +

+
+
+
+ Full Usage: + Conv3DTT(Tensor, Tensor, int[], int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    + + + Item4 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+ + Item4 + + : + int[] +
+
+
+
+
+ +

+ + + Conv3DTTConst(Tensor, Tensor, int[], int[]) + + +

+
+
+
+ Full Usage: + Conv3DTTConst(Tensor, Tensor, int[], int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    + + + Item4 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+ + Item4 + + : + int[] +
+
+
+
+
+ +

+ + + CosT Tensor + + +

+
+
+
+ Full Usage: + CosT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + CoshT Tensor + + +

+
+
+
+ Full Usage: + CoshT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + DilateT(Tensor, int[]) + + +

+
+
+
+ Full Usage: + DilateT(Tensor, int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int[] +
+
+
+
+
+ +

+ + + DivT0ConstT(scalar, Tensor) + + +

+
+
+
+ Full Usage: + DivT0ConstT(scalar, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + scalar + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + scalar +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + DivT0T(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + DivT0T(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + DivTConstT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + DivTConstT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + DivTT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + DivTT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + DivTT0(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + DivTT0(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + DivTT0Const(Tensor, scalar) + + +

+
+
+
+ Full Usage: + DivTT0Const(Tensor, scalar) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + scalar + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + scalar +
+
+
+
+
+ +

+ + + DivTTConst(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + DivTTConst(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + ExpT Tensor + + +

+
+
+
+ Full Usage: + ExpT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + ExpandT Tensor + + +

+
+
+
+ Full Usage: + ExpandT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + FlipT(Tensor, int[]) + + +

+
+
+
+ Full Usage: + FlipT(Tensor, int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int[] +
+
+
+
+
+ +

+ + + FloorT Tensor + + +

+
+
+
+ Full Usage: + FloorT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + GatherT(Tensor, int, Tensor) + + +

+
+
+
+ Full Usage: + GatherT(Tensor, int, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int + +
    + + + Item3 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int +
+
+
+ + Item3 + + : + Tensor +
+
+
+
+
+ +

+ + + Log10T Tensor + + +

+
+
+
+ Full Usage: + Log10T Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + LogT Tensor + + +

+
+
+
+ Full Usage: + LogT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + MatMulTConstT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MatMulTConstT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MatMulTT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MatMulTT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MatMulTTConst(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MatMulTTConst(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MaxPool1DT(Tensor, Tensor, int) + + +

+
+
+
+ Full Usage: + MaxPool1DT(Tensor, Tensor, int) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int +
+
+
+
+
+ +

+ + + MaxPool2DT(Tensor, Tensor, int[]) + + +

+
+
+
+ Full Usage: + MaxPool2DT(Tensor, Tensor, int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+
+
+ +

+ + + MaxPool3DT(Tensor, Tensor, int[]) + + +

+
+
+
+ Full Usage: + MaxPool3DT(Tensor, Tensor, int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    + + + Item3 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + int[] +
+
+
+
+
+ +

+ + + MaxUnpool1DT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MaxUnpool1DT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MaxUnpool2DT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MaxUnpool2DT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MaxUnpool3DT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MaxUnpool3DT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MulTConstT0(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MulTConstT0(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MulTT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MulTT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MulTT0(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MulTT0(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + MulTT0Const(Tensor, scalar) + + +

+
+
+
+ Full Usage: + MulTT0Const(Tensor, scalar) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + scalar + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + scalar +
+
+
+
+
+ +

+ + + MulTTConst(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + MulTTConst(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + NegT Tensor + + +

+
+
+
+ Full Usage: + NegT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + NewT + + +

+
+
+
+ Full Usage: + NewT +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ +

+ + + OpBinaryCT(Tensor, Tensor, Tensor * Tensor * Tensor * Tensor -> Tensor, string) + + +

+
+
+
+ Full Usage: + OpBinaryCT(Tensor, Tensor, Tensor * Tensor * Tensor * Tensor -> Tensor, string) +
+
+ Parameters: + +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + Tensor * Tensor * Tensor * Tensor -> Tensor +
+
+
+ + Item4 + + : + string +
+
+
+
+
+ +

+ + + OpBinaryTC(Tensor, Tensor, Tensor * Tensor * Tensor * Tensor -> Tensor, string) + + +

+
+
+
+ Full Usage: + OpBinaryTC(Tensor, Tensor, Tensor * Tensor * Tensor * Tensor -> Tensor, string) +
+
+ Parameters: + +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + Tensor * Tensor * Tensor * Tensor -> Tensor +
+
+
+ + Item4 + + : + string +
+
+
+
+
+ +

+ + + OpBinaryTT(Tensor, Tensor, Tensor * Tensor * Tensor * Tensor -> Tensor * Tensor, string) + + +

+
+
+
+ Full Usage: + OpBinaryTT(Tensor, Tensor, Tensor * Tensor * Tensor * Tensor -> Tensor * Tensor, string) +
+
+ Parameters: + +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+ + Item3 + + : + Tensor * Tensor * Tensor * Tensor -> Tensor * Tensor +
+
+
+ + Item4 + + : + string +
+
+
+
+
+ +

+ + + OpUnaryT(Tensor, Tensor * Tensor * Tensor -> Tensor, string) + + +

+
+
+
+ Full Usage: + OpUnaryT(Tensor, Tensor * Tensor * Tensor -> Tensor, string) +
+
+ Parameters: + +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor * Tensor * Tensor -> Tensor +
+
+
+ + Item3 + + : + string +
+
+
+
+
+ +

+ + + PermuteT(Tensor, inversePermutation) + + +

+
+
+
+ Full Usage: + PermuteT(Tensor, inversePermutation) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + inversePermutation + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + inversePermutation + + : + int[] +
+
+
+
+
+ +

+ + + PowT0ConstT(scalar, Tensor) + + +

+
+
+
+ Full Usage: + PowT0ConstT(scalar, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + scalar + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + scalar +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + PowTConstT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + PowTConstT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + PowTT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + PowTT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + PowTT0Const(Tensor, scalar) + + +

+
+
+
+ Full Usage: + PowTT0Const(Tensor, scalar) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + scalar + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + scalar +
+
+
+
+
+ +

+ + + PowTTConst(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + PowTTConst(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + ReluT Tensor + + +

+
+
+
+ Full Usage: + ReluT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + RoundT Tensor + + +

+
+
+
+ Full Usage: + RoundT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + ScatterT(Tensor, int, Tensor) + + +

+
+
+
+ Full Usage: + ScatterT(Tensor, int, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int + +
    + + + Item3 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int +
+
+
+ + Item3 + + : + Tensor +
+
+
+
+
+ +

+ + + SigmoidT Tensor + + +

+
+
+
+ Full Usage: + SigmoidT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SignT Tensor + + +

+
+
+
+ Full Usage: + SignT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SinT Tensor + + +

+
+
+
+ Full Usage: + SinT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SinhT Tensor + + +

+
+
+
+ Full Usage: + SinhT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SliceT(Tensor, int[,]) + + +

+
+
+
+ Full Usage: + SliceT(Tensor, int[,]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int[,] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int[,] +
+
+
+
+
+ +

+ + + SoftplusT Tensor + + +

+
+
+
+ Full Usage: + SoftplusT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SplitT(Tensor, int[], dim, i) + + +

+
+
+
+ Full Usage: + SplitT(Tensor, int[], dim, i) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int[] + +
    + + + dim + + : + int + +
    + + + i + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int[] +
+
+
+ + dim + + : + int +
+
+
+ + i + + : + int +
+
+
+
+
+ +

+ + + SqrtT Tensor + + +

+
+
+
+ Full Usage: + SqrtT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SqueezeT Tensor + + +

+
+
+
+ Full Usage: + SqueezeT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + StackTs(Tensor[], dim) + + +

+
+
+
+ Full Usage: + StackTs(Tensor[], dim) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor[] + +
    + + + dim + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor[] +
+
+
+ + dim + + : + int +
+
+
+
+
+ +

+ + + SubT0ConstT Tensor + + +

+
+
+
+ Full Usage: + SubT0ConstT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SubTConstT Tensor + + +

+
+
+
+ Full Usage: + SubTConstT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SubTT(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + SubTT(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + SubTT0(Tensor, Tensor) + + +

+
+
+
+ Full Usage: + SubTT0(Tensor, Tensor) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + Tensor +
+
+
+
+
+ +

+ + + SubTT0Const Tensor + + +

+
+
+
+ Full Usage: + SubTT0Const Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SubTTConst Tensor + + +

+
+
+
+ Full Usage: + SubTTConst Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SumT Tensor + + +

+
+
+
+ Full Usage: + SumT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + SumTDim(Tensor, int) + + +

+
+
+
+ Full Usage: + SumTDim(Tensor, int) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int +
+
+
+
+
+ +

+ + + TanT Tensor + + +

+
+
+
+ Full Usage: + TanT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + TanhT Tensor + + +

+
+
+
+ Full Usage: + TanhT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + TransposeT(Tensor, int, int) + + +

+
+
+
+ Full Usage: + TransposeT(Tensor, int, int) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int + +
    + + + Item3 + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int +
+
+
+ + Item3 + + : + int +
+
+
+
+
+ +

+ + + TransposeT2 Tensor + + +

+
+
+
+ Full Usage: + TransposeT2 Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + UndilateT(Tensor, int[]) + + +

+
+
+
+ Full Usage: + UndilateT(Tensor, int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int[] +
+
+
+
+
+ +

+ + + UnsqueezeT Tensor + + +

+
+
+
+ Full Usage: + UnsqueezeT Tensor +
+
+ Parameters: +
    + + + Item + + : + Tensor + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item + + : + Tensor +
+
+
+
+
+ +

+ + + UnstackT(Tensor, dim, i) + + +

+
+
+
+ Full Usage: + UnstackT(Tensor, dim, i) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + dim + + : + int + +
    + + + i + + : + int + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + dim + + : + int +
+
+
+ + i + + : + int +
+
+
+
+
+ +

+ + + ViewT(Tensor, int[]) + + +

+
+
+
+ Full Usage: + ViewT(Tensor, int[]) +
+
+ Parameters: +
    + + + Item1 + + : + Tensor + +
    + + + Item2 + + : + int[] + +
    +
+
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Item1 + + : + Tensor +
+
+
+ + Item2 + + : + int[] +
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-unaryop.html b/reference/furnace-unaryop.html new file mode 100644 index 00000000..67eb3b56 --- /dev/null +++ b/reference/furnace-unaryop.html @@ -0,0 +1,712 @@ + + + + + UnaryOp (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ UnaryOp Type +

+ +
+
+

+ Defines a new op implementing a unary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+

+

This type represents the most generic definition of a new op representing a unary function, allowing the specification of: (1) the RawTensor operation, (2) the derivative propagation rule for the forward differentiation mode and (3) the derivative propagation rule for the reverse differentiation mode.

In general, if you are implementing a simple elementwise op, you should prefer using the UnaryOpElementwise type, which is much simpler to use.

+

+
+ Example +
+

+

+ { new UnaryOp("transpose") with
+     member _.fRaw(a) = a.TransposeT2()
+     member _.ad_dfda(a,ad,f) = ad.transpose()
+     member _.fd_dfda(a,f,fd) = fd.transpose()
+ }
+

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + UnaryOp(name) + + +

+
+
+
+ Full Usage: + UnaryOp(name) +
+
+ Parameters: +
    + + + name + + : + string + +
    +
+
+ + Returns: + UnaryOp + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + name + + : + string +
+
+
+
+
+ + Returns: + + UnaryOp +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + this.ad_dfda (a, ad, f) + + +

+
+
+
+ Full Usage: + this.ad_dfda (a, ad, f) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The argument \( a \). + +
    + + + ad + + : + Tensor + - + The argument's derivative \( \frac{\partial a}{\partial x} \). + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial f(a)}{\partial x} = \frac{\partial a}{\partial x} \frac{\partial f(a)}{\partial a} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative propagation rule for forward differentiation mode. This represents the derivative of \( f(a) \) with respect a value \( x \) earlier in the computation graph than the function's argument \( a \). In other words, it computes \( \frac{\partial f(a)}{\partial x} = \frac{\partial a}{\partial x} \frac{\partial f(a)}{\partial a} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The argument \( a \). +

+
+
+ + ad + + : + Tensor +
+
+

+ The argument's derivative \( \frac{\partial a}{\partial x} \). +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial f(a)}{\partial x} = \frac{\partial a}{\partial x} \frac{\partial f(a)}{\partial a} \). +

+
+
+
+
+ +

+ + + this.fRaw a + + +

+
+
+
+ Full Usage: + this.fRaw a +
+
+ Parameters: +
    + + + a + + : + RawTensor + - + The argument \( a \). + +
    +
+
+ + Returns: + RawTensor + + The function's value \( f(a) \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ RawTensor operation \( f(a) \) performing the op. +

+
+
+
+ + a + + : + RawTensor +
+
+

+ The argument \( a \). +

+
+
+
+
+ + Returns: + + RawTensor +
+
+

+ The function's value \( f(a) \). +

+
+
+
+
+ +

+ + + this.fd_dfda (a, f, fd) + + +

+
+
+
+ Full Usage: + this.fd_dfda (a, f, fd) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The argument \( a \). + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    + + + fd + + : + Tensor + - + The derivative with respect to the function's output \( \frac{\partial y}{\partial f(a)} \). + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial y}{\partial a} = \frac{\partial y}{\partial f(a)} \frac{\partial f(a)}{\partial a} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative propagation rule for reverse differentiation mode. This represents the derivative of a value \( y \), which comes later in the computation graph than the function's value \( f(a) \), with respect to the function's argument \( a \). In other words, it computes \( \frac{\partial y}{\partial a} = \frac{\partial y}{\partial f(a)} \frac{\partial f(a)}{\partial a} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The argument \( a \). +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+ + fd + + : + Tensor +
+
+

+ The derivative with respect to the function's output \( \frac{\partial y}{\partial f(a)} \). +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial y}{\partial a} = \frac{\partial y}{\partial f(a)} \frac{\partial f(a)}{\partial a} \). +

+
+
+
+
+ +

+ + + this.name + + +

+
+
+
+ Full Usage: + this.name +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Name of the op. + +

+
+
+
+ + Returns: + + string +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-unaryopelementwise.html b/reference/furnace-unaryopelementwise.html new file mode 100644 index 00000000..0c04ec0c --- /dev/null +++ b/reference/furnace-unaryopelementwise.html @@ -0,0 +1,413 @@ + + + + + UnaryOpElementwise (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ UnaryOpElementwise Type +

+ +
+
+

+ Defines a new op implementing an elementwise unary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+

+

This type is specialized to elementwise ops. It requires the user to specify only (1) the RawTensor operation and (2) the derivative of the function with respect to its argument. The corresponding derivative propagation rules for the forward and reverse differentiation modes are automatically generated.

If you are implementing a complex op that is not elementwise, you can use the generic type UnaryOp, which allows you to define the full derivative propagation rules.

+

+
+ Example +
+

+

+ { new UnaryOpElementwise("cos") with
+     member _.fRaw(a) = a.CosT()
+     member _.dfda(a,f) = -a.sin()
+ }
+
+ { new UnaryOpElementwise("exp") with
+     member _.fRaw(a) = a.ExpT()
+     member _.dfda(a,f) = f
+ }
+
+ { new UnaryOpElementwise("log") with
+     member _.fRaw(a) = a.LogT()
+     member _.dfda(a,f) = 1/a
+ }
+

+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + UnaryOpElementwise(name) + + +

+
+
+
+ Full Usage: + UnaryOpElementwise(name) +
+
+ Parameters: +
    + + + name + + : + string + +
    +
+
+ + Returns: + UnaryOpElementwise + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + name + + : + string +
+
+
+
+
+ + Returns: + + UnaryOpElementwise +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.dfda (a, f) + + +

+
+
+
+ Full Usage: + this.dfda (a, f) +
+
+ Parameters: +
    + + + a + + : + Tensor + - + The argument \( a \) + +
    + + + f + + : + Tensor + - + The function's pre-computed primal evaluation result \( f(a) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. + +
    +
+
+ + Returns: + Tensor + + The tensor corresponding to \( \frac{\partial f(a)}{\partial a} \). +
+ Modifiers: + abstract +
+
+
+
+
+
+ + + + + + +

+ Derivative of the function with respect to its argument, \( \frac{\partial f(a)}{\partial a} \). +

+
+
+
+ + a + + : + Tensor +
+
+

+ The argument \( a \) +

+
+
+ + f + + : + Tensor +
+
+

+ The function's pre-computed primal evaluation result \( f(a) \), which can be one of the terms involved in the derivative computation (e.g., the derivative of the exponential function) and be used without the need to recompute it. +

+
+
+
+
+ + Returns: + + Tensor +
+
+

+ The tensor corresponding to \( \frac{\partial f(a)}{\partial a} \). +

+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-array.html b/reference/furnace-util-array.html new file mode 100644 index 00000000..53ad6f90 --- /dev/null +++ b/reference/furnace-util-array.html @@ -0,0 +1,1024 @@ + + + + + Array (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Array Module +

+ +
+
+

+ + Contains extensions to the F# Array module. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + allClose relativeTolerance absoluteTolerance array1 array2 + + +

+
+
+
+ Full Usage: + allClose relativeTolerance absoluteTolerance array1 array2 +
+
+ Parameters: +
    + + + relativeTolerance + + : + ^T + +
    + + + absoluteTolerance + + : + ^T + +
    + + + array1 + + : + ^T[] + +
    + + + array2 + + : + ^T[] + +
    +
+
+ + Returns: + bool + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ + Determines if all values of the first array lie within the given tolerances of the second array. + +

+
+
+
+ + relativeTolerance + + : + ^T +
+
+
+ + absoluteTolerance + + : + ^T +
+
+
+ + array1 + + : + ^T[] +
+
+
+ + array2 + + : + ^T[] +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + cumulativeSum a + + +

+
+
+
+ Full Usage: + cumulativeSum a +
+
+ Parameters: +
    + + + a + + : + ^d[] + +
    +
+
+ + Returns: + ^e[] + +
+ Modifiers: + inline +
+ Type parameters: + ^d, ^e +
+
+
+
+
+ + + + + + +

+ + Gets the cumulative sum of the input array. + +

+
+
+
+ + a + + : + ^d[] +
+
+
+
+
+ + Returns: + + ^e[] +
+
+
+
+
+ +

+ + + foralli f arr + + +

+
+
+
+ Full Usage: + foralli f arr +
+
+ Parameters: +
    + + + f + + : + int -> 'T -> bool + +
    + + + arr + + : + 'T[] + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + f + + : + int -> 'T -> bool +
+
+
+ + arr + + : + 'T[] +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + getUniqueCounts sorted values + + +

+
+
+
+ Full Usage: + getUniqueCounts sorted values +
+
+ Parameters: +
    + + + sorted + + : + bool + +
    + + + values + + : + 'T[] + +
    +
+
+ + Returns: + 'T[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ + Gets the unique counts of the input array. + +

+
+
+
+ + sorted + + : + bool +
+
+
+ + values + + : + 'T[] +
+
+
+
+
+ + Returns: + + 'T[] * int[] +
+
+
+
+
+ +

+ + + initFlat2D i j f + + +

+
+
+
+ Full Usage: + initFlat2D i j f +
+
+ Parameters: +
    + + + i + + : + int + +
    + + + j + + : + int + +
    + + + f + + : + int -> int -> 'a + +
    +
+
+ + Returns: + 'a[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + i + + : + int +
+
+
+ + j + + : + int +
+
+
+ + f + + : + int -> int -> 'a +
+
+
+
+
+ + Returns: + + 'a[] +
+
+
+
+
+ +

+ + + initFlat3D i j k f + + +

+
+
+
+ Full Usage: + initFlat3D i j k f +
+
+ Parameters: +
    + + + i + + : + int + +
    + + + j + + : + int + +
    + + + k + + : + int + +
    + + + f + + : + int -> int -> int -> 'a + +
    +
+
+ + Returns: + 'a[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + i + + : + int +
+
+
+ + j + + : + int +
+
+
+ + k + + : + int +
+
+
+ + f + + : + int -> int -> int -> 'a +
+
+
+
+
+ + Returns: + + 'a[] +
+
+
+
+
+ +

+ + + insertManyAt index values source + + +

+
+
+
+ Full Usage: + insertManyAt index values source +
+
+ Parameters: +
    + + + index + + : + int + +
    + + + values + + : + seq<'T> + +
    + + + source + + : + 'T[] + +
    +
+
+ + Returns: + 'T[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + index + + : + int +
+
+
+ + values + + : + seq<'T> +
+
+
+ + source + + : + 'T[] +
+
+
+
+
+ + Returns: + + 'T[] +
+
+
+
+
+ +

+ + + removeAt index source + + +

+
+
+
+ Full Usage: + removeAt index source +
+
+ Parameters: +
    + + + index + + : + int + +
    + + + source + + : + 'T[] + +
    +
+
+ + Returns: + 'T[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + index + + : + int +
+
+
+ + source + + : + 'T[] +
+
+
+
+
+ + Returns: + + 'T[] +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-array4d.html b/reference/furnace-util-array4d.html new file mode 100644 index 00000000..a406c867 --- /dev/null +++ b/reference/furnace-util-array4d.html @@ -0,0 +1,272 @@ + + + + + Array4D (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Array4D Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + map mapping array + + +

+
+
+
+ Full Usage: + map mapping array +
+
+ Parameters: +
    + + + mapping + + : + 'a -> 'b + +
    + + + array + + : + 'a[,,,] + +
    +
+
+ + Returns: + 'b[,,,] + +
+
+
+
+
+
+ + + + + + +

+ + Builds a new array whose elements are the results of applying the given function to each of the elements of the array. + +

+
+
+
+ + mapping + + : + 'a -> 'b +
+
+
+ + array + + : + 'a[,,,] +
+
+
+
+
+ + Returns: + + 'b[,,,] +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-array5d.html b/reference/furnace-util-array5d.html new file mode 100644 index 00000000..5af451e2 --- /dev/null +++ b/reference/furnace-util-array5d.html @@ -0,0 +1,1294 @@ + + + + + Array5D (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Array5D Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + create length1 length2 length3 length4 length5 initial + + +

+
+
+
+ Full Usage: + create length1 length2 length3 length4 length5 initial +
+
+ Parameters: +
    + + + length1 + + : + int + +
    + + + length2 + + : + int + +
    + + + length3 + + : + int + +
    + + + length4 + + : + int + +
    + + + length5 + + : + int + +
    + + + initial + + : + 'T + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + length1 + + : + int +
+
+
+ + length2 + + : + int +
+
+
+ + length3 + + : + int +
+
+
+ + length4 + + : + int +
+
+
+ + length5 + + : + int +
+
+
+ + initial + + : + 'T +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + get array index1 index2 index3 index4 index5 + + +

+
+
+
+ Full Usage: + get array index1 index2 index3 index4 index5 +
+
+ Parameters: +
    + + + array + + : + Array + +
    + + + index1 + + : + int + +
    + + + index2 + + : + int + +
    + + + index3 + + : + int + +
    + + + index4 + + : + int + +
    + + + index5 + + : + int + +
    +
+
+ + Returns: + obj + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+ + index1 + + : + int +
+
+
+ + index2 + + : + int +
+
+
+ + index3 + + : + int +
+
+
+ + index4 + + : + int +
+
+
+ + index5 + + : + int +
+
+
+
+
+ + Returns: + + obj +
+
+
+
+
+ +

+ + + init length1 length2 length3 length4 length5 initializer + + +

+
+
+
+ Full Usage: + init length1 length2 length3 length4 length5 initializer +
+
+ Parameters: +
    + + + length1 + + : + int + +
    + + + length2 + + : + int + +
    + + + length3 + + : + int + +
    + + + length4 + + : + int + +
    + + + length5 + + : + int + +
    + + + initializer + + : + int -> int -> int -> int -> int -> 'T + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + length1 + + : + int +
+
+
+ + length2 + + : + int +
+
+
+ + length3 + + : + int +
+
+
+ + length4 + + : + int +
+
+
+ + length5 + + : + int +
+
+
+ + initializer + + : + int -> int -> int -> int -> int -> 'T +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + length1 array + + +

+
+
+
+ Full Usage: + length1 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length2 array + + +

+
+
+
+ Full Usage: + length2 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length3 array + + +

+
+
+
+ Full Usage: + length3 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length4 array + + +

+
+
+
+ Full Usage: + length4 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length5 array + + +

+
+
+
+ Full Usage: + length5 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + map mapping array + + +

+
+
+
+ Full Usage: + map mapping array +
+
+ Parameters: +
    + + + mapping + + : + obj -> 'a + +
    + + + array + + : + Array + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + mapping + + : + obj -> 'a +
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + set array index1 index2 index3 index4 index5 value + + +

+
+
+
+ Full Usage: + set array index1 index2 index3 index4 index5 value +
+
+ Parameters: +
    + + + array + + : + Array + +
    + + + index1 + + : + int + +
    + + + index2 + + : + int + +
    + + + index3 + + : + int + +
    + + + index4 + + : + int + +
    + + + index5 + + : + int + +
    + + + value + + : + 'a + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+ + index1 + + : + int +
+
+
+ + index2 + + : + int +
+
+
+ + index3 + + : + int +
+
+
+ + index4 + + : + int +
+
+
+ + index5 + + : + int +
+
+
+ + value + + : + 'a +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-array6d.html b/reference/furnace-util-array6d.html new file mode 100644 index 00000000..d36fbd66 --- /dev/null +++ b/reference/furnace-util-array6d.html @@ -0,0 +1,1594 @@ + + + + + Array6D (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Array6D Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + create length1 length2 length3 length4 length5 length6 initial + + +

+
+
+
+ Full Usage: + create length1 length2 length3 length4 length5 length6 initial +
+
+ Parameters: +
    + + + length1 + + : + int + +
    + + + length2 + + : + int + +
    + + + length3 + + : + int + +
    + + + length4 + + : + int + +
    + + + length5 + + : + int + +
    + + + length6 + + : + int + +
    + + + initial + + : + 'T + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + length1 + + : + int +
+
+
+ + length2 + + : + int +
+
+
+ + length3 + + : + int +
+
+
+ + length4 + + : + int +
+
+
+ + length5 + + : + int +
+
+
+ + length6 + + : + int +
+
+
+ + initial + + : + 'T +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + get array index1 index2 index3 index4 index5 index6 + + +

+
+
+
+ Full Usage: + get array index1 index2 index3 index4 index5 index6 +
+
+ Parameters: +
    + + + array + + : + Array + +
    + + + index1 + + : + int + +
    + + + index2 + + : + int + +
    + + + index3 + + : + int + +
    + + + index4 + + : + int + +
    + + + index5 + + : + int + +
    + + + index6 + + : + int + +
    +
+
+ + Returns: + obj + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+ + index1 + + : + int +
+
+
+ + index2 + + : + int +
+
+
+ + index3 + + : + int +
+
+
+ + index4 + + : + int +
+
+
+ + index5 + + : + int +
+
+
+ + index6 + + : + int +
+
+
+
+
+ + Returns: + + obj +
+
+
+
+
+ +

+ + + init length1 length2 length3 length4 length5 length6 initializer + + +

+
+
+
+ Full Usage: + init length1 length2 length3 length4 length5 length6 initializer +
+
+ Parameters: +
    + + + length1 + + : + int + +
    + + + length2 + + : + int + +
    + + + length3 + + : + int + +
    + + + length4 + + : + int + +
    + + + length5 + + : + int + +
    + + + length6 + + : + int + +
    + + + initializer + + : + int -> int -> int -> int -> int -> int -> 'T + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + length1 + + : + int +
+
+
+ + length2 + + : + int +
+
+
+ + length3 + + : + int +
+
+
+ + length4 + + : + int +
+
+
+ + length5 + + : + int +
+
+
+ + length6 + + : + int +
+
+
+ + initializer + + : + int -> int -> int -> int -> int -> int -> 'T +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + length1 array + + +

+
+
+
+ Full Usage: + length1 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length2 array + + +

+
+
+
+ Full Usage: + length2 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length3 array + + +

+
+
+
+ Full Usage: + length3 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length4 array + + +

+
+
+
+ Full Usage: + length4 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length5 array + + +

+
+
+
+ Full Usage: + length5 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + length6 array + + +

+
+
+
+ Full Usage: + length6 array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + map mapping array + + +

+
+
+
+ Full Usage: + map mapping array +
+
+ Parameters: +
    + + + mapping + + : + obj -> 'a + +
    + + + array + + : + Array + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + mapping + + : + obj -> 'a +
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + set array index1 index2 index3 index4 index5 index6 value + + +

+
+
+
+ Full Usage: + set array index1 index2 index3 index4 index5 index6 value +
+
+ Parameters: +
    + + + array + + : + Array + +
    + + + index1 + + : + int + +
    + + + index2 + + : + int + +
    + + + index3 + + : + int + +
    + + + index4 + + : + int + +
    + + + index5 + + : + int + +
    + + + index6 + + : + int + +
    + + + value + + : + 'a + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+ + index1 + + : + int +
+
+
+ + index2 + + : + int +
+
+
+ + index3 + + : + int +
+
+
+ + index4 + + : + int +
+
+
+ + index5 + + : + int +
+
+
+ + index6 + + : + int +
+
+
+ + value + + : + 'a +
+
+
+
+
+ +

+ + + zeroCreate length1 length2 length3 length4 length5 length6 + + +

+
+
+
+ Full Usage: + zeroCreate length1 length2 length3 length4 length5 length6 +
+
+ Parameters: +
    + + + length1 + + : + int + +
    + + + length2 + + : + int + +
    + + + length3 + + : + int + +
    + + + length4 + + : + int + +
    + + + length5 + + : + int + +
    + + + length6 + + : + int + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + length1 + + : + int +
+
+
+ + length2 + + : + int +
+
+
+ + length3 + + : + int +
+
+
+ + length4 + + : + int +
+
+
+ + length5 + + : + int +
+
+
+ + length6 + + : + int +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-arraynd.html b/reference/furnace-util-arraynd.html new file mode 100644 index 00000000..cac3370f --- /dev/null +++ b/reference/furnace-util-arraynd.html @@ -0,0 +1,352 @@ + + + + + ArrayND (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ArrayND Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + init shape f + + +

+
+
+
+ Full Usage: + init shape f +
+
+ Parameters: +
    + + + shape + + : + int[] + +
    + + + f + + : + int[] -> 'T + +
    +
+
+ + Returns: + obj + +
+
+
+
+
+
+ + + + + + +

+ + Initializes an array with a given shape and initializer function. + +

+
+
+
+ + shape + + : + int[] +
+
+
+ + f + + : + int[] -> 'T +
+
+
+
+
+ + Returns: + + obj +
+
+
+
+
+ +

+ + + zeroCreate shape + + +

+
+
+
+ Full Usage: + zeroCreate shape +
+
+ Parameters: +
    + + + shape + + : + int[] + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ + Initializes an array with a given shape and initializer function. + +

+
+
+
+ + shape + + : + int[] +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-dataconverter.html b/reference/furnace-util-dataconverter.html new file mode 100644 index 00000000..9928a735 --- /dev/null +++ b/reference/furnace-util-dataconverter.html @@ -0,0 +1,1247 @@ + + + + + DataConverter (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ DataConverter Module +

+ +
+
+

+ + Contains operations relating to converting .NET data to tensor data. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + dataOfValues ofFloat32 ofFloat64 ofInt8 ofInt16 ofInt32 ofInt64 ofBool ofByte value + + +

+
+
+
+ Full Usage: + dataOfValues ofFloat32 ofFloat64 ofInt8 ofInt16 ofInt32 ofInt64 ofBool ofByte value +
+
+ Parameters: +
    + + + ofFloat32 + + : + float32 -> ^T + +
    + + + ofFloat64 + + : + double -> ^T + +
    + + + ofInt8 + + : + int8 -> ^T + +
    + + + ofInt16 + + : + int16 -> ^T + +
    + + + ofInt32 + + : + int32 -> ^T + +
    + + + ofInt64 + + : + int64 -> ^T + +
    + + + ofBool + + : + bool -> ^T + +
    + + + ofByte + + : + byte -> ^T + +
    + + + value + + : + obj + +
    +
+
+ + Returns: + ^T[] * int[] + +
+ Modifiers: + inline +
+ Type parameters: + ^T +
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ofFloat32 + + : + float32 -> ^T +
+
+
+ + ofFloat64 + + : + double -> ^T +
+
+
+ + ofInt8 + + : + int8 -> ^T +
+
+
+ + ofInt16 + + : + int16 -> ^T +
+
+
+ + ofInt32 + + : + int32 -> ^T +
+
+
+ + ofInt64 + + : + int64 -> ^T +
+
+
+ + ofBool + + : + bool -> ^T +
+
+
+ + ofByte + + : + byte -> ^T +
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + ^T[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForBool value + + +

+
+
+
+ Full Usage: + dataOfValuesForBool value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + bool[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + bool[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForByte value + + +

+
+
+
+ Full Usage: + dataOfValuesForByte value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + byte[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + byte[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForFloat32 value + + +

+
+
+
+ Full Usage: + dataOfValuesForFloat32 value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + float32[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + float32[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForFloat64 value + + +

+
+
+
+ Full Usage: + dataOfValuesForFloat64 value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + double[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + double[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForInt16 value + + +

+
+
+
+ Full Usage: + dataOfValuesForInt16 value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + int16[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + int16[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForInt32 value + + +

+
+
+
+ Full Usage: + dataOfValuesForInt32 value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + int32[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + int32[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForInt64 value + + +

+
+
+
+ Full Usage: + dataOfValuesForInt64 value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + int64[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + int64[] * int[] +
+
+
+
+
+ +

+ + + dataOfValuesForInt8 value + + +

+
+
+
+ Full Usage: + dataOfValuesForInt8 value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + int8[] * int[] + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + int8[] * int[] +
+
+
+
+
+ +

+ + + formatType ty + + +

+
+
+
+ Full Usage: + formatType ty +
+
+ Parameters: +
    + + + ty + + : + Type + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ty + + : + Type +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + tryFlatArrayAndShape value + + +

+
+
+
+ Full Usage: + tryFlatArrayAndShape value +
+
+ Parameters: +
    + + + value + + : + obj + +
    +
+
+ + Returns: + ('T[] * int[]) option + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + obj +
+
+
+
+
+ + Returns: + + ('T[] * int[]) option +
+
+
+
+
+ +

+ + + typesMatch array + + +

+
+
+
+ Full Usage: + typesMatch array +
+
+ Parameters: +
    + + + array + + : + Array + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + array + + : + Array +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-dictionary.html b/reference/furnace-util-dictionary.html new file mode 100644 index 00000000..fd31c6c2 --- /dev/null +++ b/reference/furnace-util-dictionary.html @@ -0,0 +1,338 @@ + + + + + Dictionary (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Dictionary Module +

+ +
+
+

+ + Contains extensions related to .NET Dictionary. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + copyKeys dictionary + + +

+
+
+
+ Full Usage: + copyKeys dictionary +
+
+ Parameters: +
    + + + dictionary + + : + Dictionary<'Key, 'Value> + +
    +
+
+ + Returns: + 'Key[] + +
+
+
+
+
+
+ + + + + + +

+ + Gets a fresh array containing the keys of the dictionary. + +

+
+
+
+ + dictionary + + : + Dictionary<'Key, 'Value> +
+
+
+
+
+ + Returns: + + 'Key[] +
+
+
+
+
+ +

+ + + copyValues dictionary + + +

+
+
+
+ Full Usage: + copyValues dictionary +
+
+ Parameters: +
    + + + dictionary + + : + Dictionary<'Key, 'Value> + +
    +
+
+ + Returns: + 'Value[] + +
+
+
+
+
+
+ + + + + + +

+ + Gets a fresh array containing the values of the dictionary. + +

+
+
+
+ + dictionary + + : + Dictionary<'Key, 'Value> +
+
+
+
+
+ + Returns: + + 'Value[] +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-extensionautoopens.html b/reference/furnace-util-extensionautoopens.html new file mode 100644 index 00000000..4f3cd51e --- /dev/null +++ b/reference/furnace-util-extensionautoopens.html @@ -0,0 +1,643 @@ + + + + + ExtensionAutoOpens (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ ExtensionAutoOpens Module +

+ +
+
+

+ + Contains auto-opened extensions to the F# programming model. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + array3D data + + +

+
+
+
+ Full Usage: + array3D data +
+
+ Parameters: +
    + + + data + + : + seq<'a> + +
    +
+
+ + Returns: + 'c[,,] + +
+
+
+
+
+
+ + + + + + +

+ + Creates a non-jagged 3D array from jagged data. + +

+
+
+
+ + data + + : + seq<'a> +
+
+
+
+
+ + Returns: + + 'c[,,] +
+
+
+
+
+ +

+ + + array4D data + + +

+
+
+
+ Full Usage: + array4D data +
+
+ Parameters: +
    + + + data + + : + seq<'a> + +
    +
+
+ + Returns: + 'd[,,,] + +
+
+
+
+
+
+ + + + + + +

+ + Creates a non-jagged 4D array from jagged data. + +

+
+
+
+ + data + + : + seq<'a> +
+
+
+
+
+ + Returns: + + 'd[,,,] +
+
+
+
+
+ +

+ + + array5D data + + +

+
+
+
+ Full Usage: + array5D data +
+
+ Parameters: +
    + + + data + + : + seq<'a> + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + data + + : + seq<'a> +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + array6D data + + +

+
+
+
+ Full Usage: + array6D data +
+
+ Parameters: +
    + + + data + + : + seq<'a> + +
    +
+
+ + Returns: + Array + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + data + + : + seq<'a> +
+
+
+
+
+ + Returns: + + Array +
+
+
+
+
+ +

+ + + notNull value + + +

+
+
+
+ Full Usage: + notNull value +
+
+ Parameters: +
    + + + value + + : + 'a + +
    +
+
+ + Returns: + bool + +
+ Modifiers: + inline +
+
+
+
+
+
+ + + + + + +

+ + Indicates if a value is not null. + +

+
+
+
+ + value + + : + 'a +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + print x + + +

+
+
+
+ Full Usage: + print x +
+
+ Parameters: +
    + + + x + + : + 'a + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Print the given value to the console using the '%A' printf format specifier + +

+
+
+
+ + x + + : + 'a +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-globalnestinglevel.html b/reference/furnace-util-globalnestinglevel.html new file mode 100644 index 00000000..2b79011f --- /dev/null +++ b/reference/furnace-util-globalnestinglevel.html @@ -0,0 +1,474 @@ + + + + + GlobalNestingLevel (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ GlobalNestingLevel Type +

+ +
+
+

+ + Contains operations to get, set or reset the global nesting level for differentiation operations. + +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + GlobalNestingLevel() + + +

+
+
+
+ Full Usage: + GlobalNestingLevel() +
+
+ + Returns: + GlobalNestingLevel + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + GlobalNestingLevel +
+
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + GlobalNestingLevel.Current + + +

+
+
+
+ Full Usage: + GlobalNestingLevel.Current +
+
+ + Returns: + uint32 + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + uint32 +
+
+
+
+
+ +

+ + GlobalNestingLevel.Next() + + +

+
+
+
+ Full Usage: + GlobalNestingLevel.Next() +
+
+ + Returns: + uint32 + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + uint32 +
+
+
+
+
+ +

+ + + GlobalNestingLevel.Reset() + + +

+
+
+
+ Full Usage: + GlobalNestingLevel.Reset() +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ +

+ + + GlobalNestingLevel.Set(level) + + +

+
+
+
+ Full Usage: + GlobalNestingLevel.Set(level) +
+
+ Parameters: +
    + + + level + + : + uint32 + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + level + + : + uint32 +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-helpers.html b/reference/furnace-util-helpers.html new file mode 100644 index 00000000..695260ed --- /dev/null +++ b/reference/furnace-util-helpers.html @@ -0,0 +1,428 @@ + + + + + helpers (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ helpers Module +

+ +
+
+

+ +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + printVal x + + +

+
+
+
+ Full Usage: + printVal x +
+
+ Parameters: +
    + + + x + + : + scalar + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + x + + : + scalar +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + runScript executable lines timeoutMilliseconds + + +

+
+
+
+ Full Usage: + runScript executable lines timeoutMilliseconds +
+
+ Parameters: +
    + + + executable + + : + string + +
    + + + lines + + : + string[] + +
    + + + timeoutMilliseconds + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + executable + + : + string +
+
+
+ + lines + + : + string[] +
+
+
+ + timeoutMilliseconds + + : + int +
+
+
+
+
+ +

+ + + toPython v + + +

+
+
+
+ Full Usage: + toPython v +
+
+ Parameters: +
    + + + v + + : + obj + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + v + + : + obj +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-nestinglevel.html b/reference/furnace-util-nestinglevel.html new file mode 100644 index 00000000..fbea3de5 --- /dev/null +++ b/reference/furnace-util-nestinglevel.html @@ -0,0 +1,381 @@ + + + + + NestingLevel (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ NestingLevel Type +

+ +
+
+

+ + Represents a differentiation nesting level. + +

+
+
+
+
+
+
+
+

+ Record fields +

+ + + + + + + + + + + + + +
+ Record Field + + Description +
+
+ +

+ + + Current + + +

+
+
+
+ Full Usage: + Current +
+
+ + Field type: + uint32 + +
+ Modifiers: + mutable +
+
+
+
+
+
+ + + + +

+ +

+
+
+
+ + Field type: + + uint32 +
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + NestingLevel() + + +

+
+
+
+ Full Usage: + NestingLevel() +
+
+ + Returns: + NestingLevel + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + NestingLevel +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + this.Next + + +

+
+
+
+ Full Usage: + this.Next +
+
+ + Returns: + uint32 + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + uint32 +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-ordereddictionary.html b/reference/furnace-util-ordereddictionary.html new file mode 100644 index 00000000..8ca0e52b --- /dev/null +++ b/reference/furnace-util-ordereddictionary.html @@ -0,0 +1,258 @@ + + + + + OrderedDictionary (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ OrderedDictionary Module +

+ +
+
+

+ + Contains extensions related to .NET OrderedDictionary. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + copyKeys dictionary + + +

+
+
+
+ Full Usage: + copyKeys dictionary +
+
+ Parameters: + +
+ + Returns: + 'b[] + +
+
+
+
+
+
+ + + + + + +

+ + Gets a fresh array containing the keys of the dictionary. + +

+
+
+
+ + dictionary + + : + OrderedDictionary +
+
+
+
+
+ + Returns: + + 'b[] +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-pyplot.html b/reference/furnace-util-pyplot.html new file mode 100644 index 00000000..794b6fce --- /dev/null +++ b/reference/furnace-util-pyplot.html @@ -0,0 +1,1216 @@ + + + + + Pyplot (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Pyplot Type +

+ +
+
+

+ +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Pyplot(?pythonExecutable, ?timeoutMilliseconds) + + +

+
+
+
+ Full Usage: + Pyplot(?pythonExecutable, ?timeoutMilliseconds) +
+
+ Parameters: +
    + + + ?pythonExecutable + + : + string + +
    + + + ?timeoutMilliseconds + + : + int + +
    +
+
+ + Returns: + Pyplot + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?pythonExecutable + + : + string +
+
+
+ + ?timeoutMilliseconds + + : + int +
+
+
+
+
+ + Returns: + + Pyplot +
+
+
+
+
+
+

+ Instance members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Instance member + + Description +
+
+ +

+ + + this.addPython line + + +

+
+
+
+ Full Usage: + this.addPython line +
+
+ Parameters: +
    + + + line + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + line + + : + string +
+
+
+
+
+ +

+ + + this.figure ?figSize + + +

+
+
+
+ Full Usage: + this.figure ?figSize +
+
+ Parameters: +
    + + + ?figSize + + : + float * float + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + ?figSize + + : + float * float +
+
+
+
+
+ +

+ + + this.hist (x, ?weights, ?bins, ?density, ?label) + + +

+
+
+
+ Full Usage: + this.hist (x, ?weights, ?bins, ?density, ?label) +
+
+ Parameters: +
    + + + x + + : + Tensor + +
    + + + ?weights + + : + Tensor + +
    + + + ?bins + + : + int + +
    + + + ?density + + : + bool + +
    + + + ?label + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + x + + : + Tensor +
+
+
+ + ?weights + + : + Tensor +
+
+
+ + ?bins + + : + int +
+
+
+ + ?density + + : + bool +
+
+
+ + ?label + + : + string +
+
+
+
+
+ +

+ + + this.legend () + + +

+
+
+
+ Full Usage: + this.legend () +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ +

+ + + this.plot (y, ?alpha, ?label) + + +

+
+
+
+ Full Usage: + this.plot (y, ?alpha, ?label) +
+
+ Parameters: +
    + + + y + + : + Tensor + +
    + + + ?alpha + + : + float + +
    + + + ?label + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + y + + : + Tensor +
+
+
+ + ?alpha + + : + float +
+
+
+ + ?label + + : + string +
+
+
+
+
+ +

+ + + this.plot (x, y, ?alpha, ?label) + + +

+
+
+
+ Full Usage: + this.plot (x, y, ?alpha, ?label) +
+
+ Parameters: +
    + + + x + + : + Tensor + +
    + + + y + + : + Tensor + +
    + + + ?alpha + + : + float + +
    + + + ?label + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + x + + : + Tensor +
+
+
+ + y + + : + Tensor +
+
+
+ + ?alpha + + : + float +
+
+
+ + ?label + + : + string +
+
+
+
+
+ +

+ + + this.savefig fileName + + +

+
+
+
+ Full Usage: + this.savefig fileName +
+
+ Parameters: +
    + + + fileName + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + fileName + + : + string +
+
+
+
+
+ +

+ + + this.script + + +

+
+
+
+ Full Usage: + this.script +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + this.tightLayout () + + +

+
+
+
+ Full Usage: + this.tightLayout () +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ +

+ + + this.xlabel label + + +

+
+
+
+ Full Usage: + this.xlabel label +
+
+ Parameters: +
    + + + label + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + label + + : + string +
+
+
+
+
+ +

+ + + this.xscale value + + +

+
+
+
+ Full Usage: + this.xscale value +
+
+ Parameters: +
    + + + value + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + string +
+
+
+
+
+ +

+ + + this.ylabel label + + +

+
+
+
+ Full Usage: + this.ylabel label +
+
+ Parameters: +
    + + + label + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + label + + : + string +
+
+
+
+
+ +

+ + + this.yscale value + + +

+
+
+
+ Full Usage: + this.yscale value +
+
+ Parameters: +
    + + + value + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + value + + : + string +
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-random.html b/reference/furnace-util-random.html new file mode 100644 index 00000000..df340481 --- /dev/null +++ b/reference/furnace-util-random.html @@ -0,0 +1,1662 @@ + + + + + Random (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Random Type +

+ +
+
+

+ + Contains operations relating to pseudo-random number generation. + +

+
+
+
+
+
+
+
+
+
+

+ Constructors +

+ + + + + + + + + + + + + +
+ Constructor + + Description +
+
+ +

+ + + Random() + + +

+
+
+
+ Full Usage: + Random() +
+
+ + Returns: + Random + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + Returns: + + Random +
+
+
+
+
+
+
+

+ Static members +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Static member + + Description +
+
+ +

+ + + Random.Bernoulli() + + +

+
+
+
+ Full Usage: + Random.Bernoulli() +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a random value from the Bernoulli distribution. + +

+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + Random.Bernoulli(prob) + + +

+
+
+
+ Full Usage: + Random.Bernoulli(prob) +
+
+ Parameters: +
    + + + prob + + : + float + +
    +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a random value from the Bernoulli distribution with the given probability. + +

+
+
+
+ + prob + + : + float +
+
+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + Random.Choice(array, probs) + + +

+
+
+
+ Full Usage: + Random.Choice(array, probs) +
+
+ Parameters: +
    + + + array + + : + 'a[] + +
    + + + probs + + : + float[] + +
    +
+
+ + Returns: + 'a + +
+
+
+
+
+
+ + + + + + +

+ + Samples a value at random from the given array using the given categorical probabilities. + +

+
+
+
+ + array + + : + 'a[] +
+
+
+ + probs + + : + float[] +
+
+
+
+
+ + Returns: + + 'a +
+
+
+
+
+ +

+ + + Random.Choice(array) + + +

+
+
+
+ Full Usage: + Random.Choice(array) +
+
+ Parameters: +
    + + + array + + : + 'a[] + +
    +
+
+ + Returns: + 'a + +
+
+
+
+
+
+ + + + + + +

+ + Samples a value at random from the given array. + +

+
+
+
+ + array + + : + 'a[] +
+
+
+
+
+ + Returns: + + 'a +
+
+
+
+
+ +

+ + + Random.ChoiceIndex(probs) + + +

+
+
+
+ Full Usage: + Random.ChoiceIndex(probs) +
+
+ Parameters: +
    + + + probs + + : + float[] + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Samples an index at random with the given categorical probabilities. + +

+
+
+
+ + probs + + : + float[] +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + Random.Double(low, high) + + +

+
+
+
+ Full Usage: + Random.Double(low, high) +
+
+ Parameters: +
    + + + low + + : + float + +
    + + + high + + : + float + +
    +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a double value in the given range [low, high) + +

+
+
+
+ + low + + : + float +
+
+
+ + high + + : + float +
+
+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + Random.Double() + + +

+
+
+
+ Full Usage: + Random.Double() +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a double value in the range [0, 1) + +

+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + Random.Integer(low, high) + + +

+
+
+
+ Full Usage: + Random.Integer(low, high) +
+
+ Parameters: +
    + + + low + + : + int + +
    + + + high + + : + int + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Samples a random integer in the given range [low, high). + +

+
+
+
+ + low + + : + int +
+
+
+ + high + + : + int +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + Random.Integer() + + +

+
+
+
+ Full Usage: + Random.Integer() +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Samples a non-negative random integer + +

+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + Random.Multinomial(probs, numSamples) + + +

+
+
+
+ Full Usage: + Random.Multinomial(probs, numSamples) +
+
+ Parameters: +
    + + + probs + + : + float[,] + +
    + + + numSamples + + : + int + +
    +
+
+ + Returns: + int[,] + +
+
+
+
+
+
+ + + + + + +

+ + Returns a 2D array where each row contains `numSamples` indices sampled from the multinomial probability distribution defined by the probabilities in the corresponding row of the `probs` array. + +

+
+
+
+ + probs + + : + float[,] +
+
+
+ + numSamples + + : + int +
+
+
+
+
+ + Returns: + + int[,] +
+
+
+
+
+ +

+ + + Random.Multinomial(probs, numSamples) + + +

+
+
+
+ Full Usage: + Random.Multinomial(probs, numSamples) +
+
+ Parameters: +
    + + + probs + + : + float[] + +
    + + + numSamples + + : + int + +
    +
+
+ + Returns: + int[] + +
+
+
+
+
+
+ + + + + + +

+ + Samples a number of random values array of random values for the given weighted distribution + +

+
+
+
+ + probs + + : + float[] +
+
+
+ + numSamples + + : + int +
+
+
+
+
+ + Returns: + + int[] +
+
+
+
+
+ +

+ + + Random.Normal(mean, stddev) + + +

+
+
+
+ Full Usage: + Random.Normal(mean, stddev) +
+
+ Parameters: +
    + + + mean + + : + float + +
    + + + stddev + + : + float + +
    +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a random value from the normal distribution with the given mean and standard deviation. + +

+
+
+
+ + mean + + : + float +
+
+
+ + stddev + + : + float +
+
+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + Random.Normal() + + +

+
+
+
+ Full Usage: + Random.Normal() +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a random value from the standard normal distribution with mean 0 and standard deviation 1. + +

+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + Random.Seed(seed) + + +

+
+
+
+ Full Usage: + Random.Seed(seed) +
+
+ Parameters: +
    + + + seed + + : + int + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Sets the random seed. + +

+
+
+
+ + seed + + : + int +
+
+
+
+
+ +

+ + + Random.Shuffle(array) + + +

+
+
+
+ Full Usage: + Random.Shuffle(array) +
+
+ Parameters: +
    + + + array + + : + 'a[] + +
    +
+
+ + Returns: + 'a[] + +
+
+
+
+
+
+ + + + + + +

+ + Returns an array that is a randomly-shuffled version of the given array, using the Durstenfeld/Knuth shuffle. + +

+
+
+
+ + array + + : + 'a[] +
+
+
+
+
+ + Returns: + + 'a[] +
+
+
+
+
+ +

+ + + Random.UUID() + + +

+
+
+
+ Full Usage: + Random.UUID() +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Returns a universally unique identifier (UUID) string + +

+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + Random.Uniform(low, high) + + +

+
+
+
+ Full Usage: + Random.Uniform(low, high) +
+
+ Parameters: +
    + + + low + + : + float + +
    + + + high + + : + float + +
    +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a random value from the uniform distribution with the given parameters [low, high). + +

+
+
+
+ + low + + : + float +
+
+
+ + high + + : + float +
+
+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + Random.Uniform() + + +

+
+
+
+ Full Usage: + Random.Uniform() +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Samples a random value from the standard uniform distribution over the interval [0,1). + +

+
+
+
+ + Returns: + + float +
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-randommodule.html b/reference/furnace-util-randommodule.html new file mode 100644 index 00000000..afc455cf --- /dev/null +++ b/reference/furnace-util-randommodule.html @@ -0,0 +1,258 @@ + + + + + Random (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Random Module +

+ +
+
+

+ + Contains operations relating to pseudo-random number generation. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + shuffledIndices length + + +

+
+
+
+ Full Usage: + shuffledIndices length +
+
+ Parameters: +
    + + + length + + : + int + +
    +
+
+ + Returns: + int -> int + +
+
+
+
+
+
+ + + + + + +

+ + Returns a function that maps a given index to a shuffled version of the indexes up to the given `length` + +

+
+
+
+ + length + + : + int +
+
+
+
+
+ + Returns: + + int -> int +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-seq.html b/reference/furnace-util-seq.html new file mode 100644 index 00000000..adebc6c7 --- /dev/null +++ b/reference/furnace-util-seq.html @@ -0,0 +1,663 @@ + + + + + Seq (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Seq Module +

+ +
+
+

+ + Contains extensions to the F# Seq module. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + allEqual items + + +

+
+
+
+ Full Usage: + allEqual items +
+
+ Parameters: +
    + + + items + + : + seq<'T> + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if all elements of the sequence are equal. + +

+
+
+
+ + items + + : + seq<'T> +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + duplicates l + + +

+
+
+
+ Full Usage: + duplicates l +
+
+ Parameters: +
    + + + l + + : + seq<'a> + +
    +
+
+ + Returns: + 'a list + +
+
+
+
+
+
+ + + + + + +

+ + Gets the duplicate elements in the sequence. + +

+
+
+
+ + l + + : + seq<'a> +
+
+
+
+
+ + Returns: + + 'a list +
+
+
+
+
+ +

+ + + hasDuplicates l + + +

+
+
+
+ Full Usage: + hasDuplicates l +
+
+ Parameters: +
    + + + l + + : + seq<'s> + +
    +
+
+ + Returns: + bool + +
+
+
+
+
+
+ + + + + + +

+ + Indicates if a sequence has duplicate elements. + +

+
+
+
+ + l + + : + seq<'s> +
+
+
+
+
+ + Returns: + + bool +
+
+
+
+
+ +

+ + + maxIndex seq + + +

+
+
+
+ Full Usage: + maxIndex seq +
+
+ Parameters: +
    + + + seq + + : + seq<'a> + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Gets the index of the maximum element of the sequence. + +

+
+
+
+ + seq + + : + seq<'a> +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + minIndex seq + + +

+
+
+
+ Full Usage: + minIndex seq +
+
+ Parameters: +
    + + + seq + + : + seq<'a> + +
    +
+
+ + Returns: + int + +
+
+
+
+
+
+ + + + + + +

+ + Gets the index of the minimum element of the sequence. + +

+
+
+
+ + seq + + : + seq<'a> +
+
+
+
+
+ + Returns: + + int +
+
+
+
+
+ +

+ + + toArrayQuick xs + + +

+
+
+
+ Full Usage: + toArrayQuick xs +
+
+ Parameters: +
    + + + xs + + : + seq<'T> + +
    +
+
+ + Returns: + 'T[] + +
+ Modifiers: + inline +
+ Type parameters: + 'T +
+
+
+
+
+ + + + + + +

+ + Like Seq.toArray but does not clone the array if the input is already an array + +

+
+
+
+ + xs + + : + seq<'T> +
+
+
+
+
+ + Returns: + + 'T[] +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util-utilautoopens.html b/reference/furnace-util-utilautoopens.html new file mode 100644 index 00000000..6bff8bc6 --- /dev/null +++ b/reference/furnace-util-utilautoopens.html @@ -0,0 +1,1398 @@ + + + + + UtilAutoOpens (Furnace) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ UtilAutoOpens Module +

+ +
+
+

+ + Contains auto-opened utilities related to the Furnace programming model. + +

+
+
+
+

+ Functions and values +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function or value + + Description +
+
+ +

+ + + !r + + +

+
+
+
+ Full Usage: + !r +
+
+ Parameters: +
    + + + r + + : + 'T ref + +
    +
+
+ + Returns: + 'T + +
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + r + + : + 'T ref +
+
+
+
+
+ + Returns: + + 'T +
+
+
+
+
+ +

+ + + r := v + + +

+
+
+
+ Full Usage: + r := v +
+
+ Parameters: +
    + + + r + + : + 'T ref + +
    + + + v + + : + 'T + +
    +
+
+
+
+
+
+
+ + + + + + +

+ +

+
+
+
+ + r + + : + 'T ref +
+
+
+ + v + + : + 'T +
+
+
+
+
+ +

+ + + bytesReadable i + + +

+
+
+
+ Full Usage: + bytesReadable i +
+
+ Parameters: +
    + + + i + + : + int64 + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Return a human-readable string representation of the given value in Bytes. + +

+
+
+
+ + i + + : + int64 +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + fileToBase64String fileName + + +

+
+
+
+ Full Usage: + fileToBase64String fileName +
+
+ Parameters: +
    + + + fileName + + : + string + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Returns the file contents as Base64 encoded string + +

+
+
+
+ + fileName + + : + string +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + indentNewLines str numSpaces + + +

+
+
+
+ Full Usage: + indentNewLines str numSpaces +
+
+ Parameters: +
    + + + str + + : + String + +
    + + + numSpaces + + : + int + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Indents all lines of the given string by the given number of spaces. + +

+
+
+
+ + str + + : + String +
+
+
+ + numSpaces + + : + int +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + loadBinary fileName + + +

+
+
+
+ Full Usage: + loadBinary fileName +
+
+ Parameters: +
    + + + fileName + + : + string + +
    +
+
+ + Returns: + 'T + +
+
+
+
+
+
+ + + + + + +

+ + Loads the given value from the given local file using binary serialization. + +

+
+
+
+ + fileName + + : + string +
+
+
+
+
+ + Returns: + + 'T +
+
+
+
+
+ +

+ + + log10Val + + +

+
+
+
+ Full Usage: + log10Val +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Value of log(10). + +

+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + logSqrt2Pi + + +

+
+
+
+ Full Usage: + logSqrt2Pi +
+
+ + Returns: + float + +
+
+
+
+
+
+ + + + + + +

+ + Value of log(sqrt(2*Math.PI)). + +

+
+
+
+ + Returns: + + float +
+
+
+
+
+ +

+ + + memoize fn + + +

+
+
+
+ Full Usage: + memoize fn +
+
+ Parameters: +
    + + + fn + + : + 'a -> 'b + +
    +
+
+ + Returns: + 'a -> 'b + +
+
+
+
+
+
+ + + + + + +

+ + Returns a function that memoizes the given function using a lookaside table. + +

+
+
+
+ + fn + + : + 'a -> 'b +
+
+
+
+
+ + Returns: + + 'a -> 'b +
+
+
+
+
+ +

+ + + pngToHtml fileName widthPixels + + +

+
+
+
+ Full Usage: + pngToHtml fileName widthPixels +
+
+ Parameters: +
    + + + fileName + + : + string + +
    + + + widthPixels + + : + int + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Given a PNG image file name, returns an HTML image element with the image content included as a Base64 encoded string + +

+
+
+
+ + fileName + + : + string +
+
+
+ + widthPixels + + : + int +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + saveBinary object fileName + + +

+
+
+
+ Full Usage: + saveBinary object fileName +
+
+ Parameters: +
    + + + object + + : + 'T + +
    + + + fileName + + : + string + +
    +
+
+
+
+
+
+
+ + + + + + +

+ + Saves the given value to the given local file using binary serialization. + +

+
+
+
+ + object + + : + 'T +
+
+
+ + fileName + + : + string +
+
+
+
+
+ +

+ + + stringPad s width + + +

+
+
+
+ Full Usage: + stringPad s width +
+
+ Parameters: +
    + + + s + + : + string + +
    + + + width + + : + int + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Left-pads a string up to the given length. + +

+
+
+
+ + s + + : + string +
+
+
+ + width + + : + int +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + stringPadAs s1 s2 + + +

+
+
+
+ Full Usage: + stringPadAs s1 s2 +
+
+ Parameters: +
    + + + s1 + + : + string + +
    + + + s2 + + : + string + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Left-pads a string to match the length of another string. + +

+
+
+
+ + s1 + + : + string +
+
+
+ + s2 + + : + string +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + thousandsFloat x + + +

+
+
+
+ Full Usage: + thousandsFloat x +
+
+ Parameters: +
    + + + x + + : + float + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Formats an integer as a string with comma as thousands separator + +

+
+
+
+ + x + + : + float +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+ +

+ + + thousandsInt x + + +

+
+
+
+ Full Usage: + thousandsInt x +
+
+ Parameters: +
    + + + x + + : + int + +
    +
+
+ + Returns: + string + +
+
+
+
+
+
+ + + + + + +

+ + Formats an integer as a string with comma as thousands separator + +

+
+
+
+ + x + + : + int +
+
+
+
+
+ + Returns: + + string +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace-util.html b/reference/furnace-util.html new file mode 100644 index 00000000..ce2da11a --- /dev/null +++ b/reference/furnace-util.html @@ -0,0 +1,682 @@ + + + + + Furnace.Util + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Furnace.Util Namespace +

+
+

+ Contains utilities and library extensions related to the Furnace programming model. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type/Module + + Description +
+

+ + + Array + + +

+
+
+ + + + + + +

+ + Contains extensions to the F# Array module. + +

+
+
+

+ + + Array4D + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Array5D + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Array6D + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + ArrayND + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + DataConverter + + +

+
+
+ + + + + + +

+ + Contains operations relating to converting .NET data to tensor data. + +

+
+
+

+ + + Dictionary + + +

+
+
+ + + + + + +

+ + Contains extensions related to .NET Dictionary. + +

+
+
+

+ + + ExtensionAutoOpens + + +

+
+
+ + + + + + +

+ + Contains auto-opened extensions to the F# programming model. + +

+
+
+

+ + + GlobalNestingLevel + + +

+
+
+ + + + + + +

+ + Contains operations to get, set or reset the global nesting level for differentiation operations. + +

+
+
+

+ + + helpers + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + NestingLevel + + +

+
+
+ + + + + + +

+ + Represents a differentiation nesting level. + +

+
+
+

+ + + OrderedDictionary + + +

+
+
+ + + + + + +

+ + Contains extensions related to .NET OrderedDictionary. + +

+
+
+

+ + + Pyplot + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Random (Module) + + +

+
+
+ + + + + + +

+ + Contains operations relating to pseudo-random number generation. + +

+
+
+

+ + + Random (Type) + + +

+
+
+ + + + + + +

+ + Contains operations relating to pseudo-random number generation. + +

+
+
+

+ + + Seq + + +

+
+
+ + + + + + +

+ + Contains extensions to the F# Seq module. + +

+
+
+

+ + + UtilAutoOpens + + +

+
+
+ + + + + + +

+ + Contains auto-opened utilities related to the Furnace programming model. + +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/furnace.html b/reference/furnace.html new file mode 100644 index 00000000..dda9618b --- /dev/null +++ b/reference/furnace.html @@ -0,0 +1,1258 @@ + + + + + Furnace + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ Furnace Namespace +

+
+

+ Contains fundamental types for the tensor programming model, including Tensor, Shape and FurnaceImage. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type/Module + + Description +
+

+ + + Backend (Module) + + +

+
+
+ + + + + + +

+ + Contains functions and settings related to backend specifications. + +

+
+
+

+ + + Backend (Type) + + +

+
+
+ + + + + + +

+ + Represents a backend for Furnace tensors + +

+
+
+

+ + + BackendFunctionality<'T> + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + BinaryOp + + +

+
+
+ + + + + + +

+ Defines a new op implementing a binary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+
+

+ + + BinaryOpElementwise + + +

+
+
+ + + + + + +

+ Defines a new op implementing an elementwise binary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+
+

+ + + Compose + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Device (Module) + + +

+
+
+ + + + + + +

+ + Contains functions and settings related to device specifications. + +

+
+
+

+ + + Device (Type) + + +

+
+
+ + + + + + +

+ + Represents a device specification. + +

+
+
+

+ + + DeviceType + + +

+
+
+ + + + + + +

+ + Represents the type of a device. + +

+
+
+

+ + + Dtype (Module) + + +

+
+
+ + + + + + +

+ + Contains functions and settings related to tensor element types + +

+
+
+

+ + + Dtype (Type) + + +

+
+
+ + + + + + +

+ + Represents a storage type for elements of a tensor + +

+
+
+

+ + + DtypeAutoOpens + + +

+
+
+ + + + + + +

+ + Contains global functions and settings related to tensor element types, used when writing backends. + +

+
+
+

+ + + FurnaceImage + + +

+
+
+ + + + + + +

+ + Tensor operations + +

+
+
+

+ + + ImageExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + ImageUtil + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Numerical + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + OpAvgPoolExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + OpBMMExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + OpDetExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + OpInvExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + OpNormExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + OpOuterExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + OpSolveExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Printer (Module) + + +

+
+
+ + + + + + +

+ + Contains functions and settings related to print options. + +

+
+
+

+ + + Printer (Type) + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + scalar + + +

+
+
+ + + + + + +

+ + Represents a scalar on the Furnace programming model + +

+
+
+

+ + + ScalarExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Shape (Module) + + +

+
+
+ + + + + + +

+ + Contains functions and values related to tensor shapes. + +

+
+
+

+ + + Shape (Type) + + +

+
+
+ + + + + + +

+ + Represents the shape of a tensor. + +

+
+
+

+ + + ShapeAutoOpens + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Shorten + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + SlicingExtensions + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + Tensor + + +

+
+
+ + + + + + +

+ + Represents a multi-dimensional data type containing elements of a single data type. + +

+
+
+

+ + + TensorOp + + +

+
+
+ + + + + + +

+ +

+
+
+

+ + + UnaryOp + + +

+
+
+ + + + + + +

+ Defines a new op implementing a unary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+
+

+ + + UnaryOpElementwise + + +

+
+
+ + + + + + +

+ Defines a new op implementing an elementwise unary function and its derivatives. Instances of this class are used with the Tensor.Op method to define a new differentiable tensor function that supports forward, reverse, and nested differentiation. +

+
+
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/index.html b/reference/index.html new file mode 100644 index 00000000..fddef02d --- /dev/null +++ b/reference/index.html @@ -0,0 +1,218 @@ + + + + + Furnace (API Reference) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+
+

+ API Reference +

+

+ Available Namespaces: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Namespace + + Description +
+ + Furnace + + + Contains fundamental types for the tensor programming model, including Tensor, Shape and FurnaceImage. +
+ + Furnace.Backends + + + Contains types and functionality related to backend implementations for Furnace. +
+ + Furnace.Data + + + Contains datasets and components related to data loading. +
+ + Furnace.Distributions + + + Contains types and functionality related to probabilitity distributions. +
+ + Furnace.Model + + + Contains types and functionality related to describing models. +
+ + Furnace.Optim + + + Contains types and functionality related to optimizing models and functions. +
+ + Furnace.Util + + + Contains utilities and library extensions related to the Furnace programming model. +
+
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tensors.fsx b/tensors.fsx new file mode 100644 index 00000000..f39683c6 --- /dev/null +++ b/tensors.fsx @@ -0,0 +1,37 @@ +#r "nuget: Furnace-lite,1.0.8" +(** +[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/tensors.ipynb)  +[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath=tensors.ipynb)  +[![Script](img/badge-script.svg)](tensors.fsx)  +[![Script](img/badge-notebook.svg)](tensors.ipynb) + +* The [FurnaceImage](https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html) API + + +* The [Tensor](https://fsprojects.github.io/Furnace/reference/furnace-tensor.html) type + + +Saving tensors as image and loading images as tensors + +## Converting between Tensors and arrays + +System.Array and F# arrays + +*) +open Furnace + +// Tensor +let t1 = FurnaceImage.tensor [ 0.0 .. 0.2 .. 1.0 ] + +// System.Array +let a1 = t1.toArray() + +// [] +let a1b = t1.toArray() :?> float32[] + +// Tensor +let t2 = FurnaceImage.randn([3;3;3]) + +// [,,] +let a2 = t2.toArray() :?> float32[,,] + diff --git a/tensors.html b/tensors.html new file mode 100644 index 00000000..9b35a78f --- /dev/null +++ b/tensors.html @@ -0,0 +1,204 @@ + + + + + tensors + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+

Binder  +Binder  +Script  +Script

+ +

Saving tensors as image and loading images as tensors

+

Converting between Tensors and arrays

+

System.Array and F# arrays

+
open Furnace
+
+// Tensor
+let t1 = FurnaceImage.tensor [ 0.0 .. 0.2 .. 1.0 ]
+
+// System.Array
+let a1 = t1.toArray()
+
+// []<float32>
+let a1b = t1.toArray() :?> float32[]
+
+// Tensor
+let t2 = FurnaceImage.randn([3;3;3])
+
+// [,,]<float32>
+let a2 = t2.toArray() :?> float32[,,]
+
+ +
namespace Furnace
+
type FurnaceImage = + static member abs: input: Tensor -> Tensor + static member acos: input: Tensor -> Tensor + static member add: a: Tensor * b: Tensor -> Tensor + static member arange: endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member arangeLike: input: Tensor * endVal: float * ?startVal: float * ?step: float * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor + 1 overload + static member argmax: input: Tensor -> int[] + 1 overload + static member argmin: input: Tensor -> int[] + 1 overload + static member asin: input: Tensor -> Tensor + static member atan: input: Tensor -> Tensor + static member backends: unit -> Backend list + ...
<summary> + Tensor operations +</summary>
+
static member Furnace.FurnaceImage.config: unit -> Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer
static member Furnace.FurnaceImage.config: configuration: (Furnace.Device * Furnace.Dtype * Furnace.Backend * Furnace.Printer) -> unit
static member Furnace.FurnaceImage.config: ?device: Furnace.Device * ?dtype: Furnace.Dtype * ?backend: Furnace.Backend * ?printer: Furnace.Printer -> unit
+
Multiple items
module Backend + +from Furnace
<summary> + Contains functions and settings related to backend specifications. +</summary>

--------------------
type Backend = + | Reference + | Torch + | Other of name: string * code: int + override ToString: unit -> string + member Name: string
<summary> + Represents a backend for Furnace tensors +</summary>
+
union case Furnace.Backend.Reference: Furnace.Backend
<summary> + The reference backend +</summary>
+
static member Furnace.FurnaceImage.seed: ?seed: int -> unit
+
val t1: Tensor
+
static member FurnaceImage.tensor: value: obj * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor
+
val a1: System.Array
+
val a1b: float32[]
+
Multiple items
val float32: value: 'T -> float32 (requires member op_Explicit)
<summary>Converts the argument to 32-bit float. This is a direct conversion for all + primitive numeric types. For strings, the input is converted using <c>Single.Parse()</c> + with InvariantCulture settings. Otherwise the operation requires an appropriate + static conversion method on the input type.</summary>
<param name="value">The input value.</param>
<returns>The converted float32</returns>
<example id="float32-example"><code lang="fsharp"></code></example>


--------------------
[<Struct>] +type float32 = System.Single
<summary>An abbreviation for the CLI type <see cref="T:System.Single" />.</summary>
<category>Basic Types</category>


--------------------
type float32<'Measure> = + float32
<summary>The type of single-precision floating point numbers, annotated with a unit of measure. + The unit of measure is erased in compiled code and when values of this type + are analyzed using reflection. The type is representationally equivalent to + <see cref="T:System.Single" />. + </summary>
<category>Basic Types with Units of Measure</category>
+
val t2: Tensor
+
static member FurnaceImage.randn: length: int * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor
static member FurnaceImage.randn: shape: seq<int> * ?device: Device * ?dtype: Dtype * ?backend: Backend -> Tensor
+
val a2: float32[,,]
+ +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tensors.ipynb b/tensors.ipynb new file mode 100644 index 00000000..2f9a014f --- /dev/null +++ b/tensors.ipynb @@ -0,0 +1,82 @@ + + { + "cells": [ + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Google Colab only: uncomment and run the following to install dotnet and the F# kernel\n", +"// !bash \u003c(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": null, "outputs": [], + "source": ["// Import Furnace package\n", +"#r \"nuget: Furnace-lite,1.0.8\"\n", +"\n", +"// Set dotnet interactive formatter to plaintext\n", +"Formatter.SetPreferredMimeTypesFor(typeof\u003cobj\u003e, \"text/plain\")\n", +"Formatter.Register(fun (x:obj) (writer: TextWriter) -\u003e fprintfn writer \"%120A\" x )\n"] + } +, + { + "cell_type": "markdown", + "metadata": {}, + + "source": ["[![Binder](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/tensors.ipynb)\u0026emsp;\n", +"[![Binder](img/badge-binder.svg)](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath=tensors.ipynb)\u0026emsp;\n", +"[![Script](img/badge-script.svg)](tensors.fsx)\u0026emsp;\n", +"[![Script](img/badge-notebook.svg)](tensors.ipynb)\n", +"\n", +"* The [FurnaceImage](https://fsprojects.github.io/Furnace/reference/furnace-furnaceimage.html) API\n", +" \n", +"\n", +"* The [Tensor](https://fsprojects.github.io/Furnace/reference/furnace-tensor.html) type\n", +" \n", +"\n", +"Saving tensors as image and loading images as tensors\n", +"\n", +"## Converting between Tensors and arrays\n", +"\n", +"System.Array and F# arrays\n", +"\n"] + } +, + { + "cell_type": "code", + "metadata": {}, + "execution_count": 2, "outputs": [], + "source": ["open Furnace\n", +"\n", +"// Tensor\n", +"let t1 = FurnaceImage.tensor [ 0.0 .. 0.2 .. 1.0 ]\n", +"\n", +"// System.Array\n", +"let a1 = t1.toArray()\n", +"\n", +"// []\u003cfloat32\u003e\n", +"let a1b = t1.toArray() :?\u003e float32[]\n", +"\n", +"// Tensor\n", +"let t2 = FurnaceImage.randn([3;3;3])\n", +"\n", +"// [,,]\u003cfloat32\u003e\n", +"let a2 = t2.toArray() :?\u003e float32[,,]\n"] + }], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/test.png b/test.png new file mode 100644 index 00000000..ddb2639a Binary files /dev/null and b/test.png differ diff --git a/tutorial-classifier.fsx b/tutorial-classifier.fsx new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/tutorial-classifier.fsx @@ -0,0 +1,2 @@ + + diff --git a/tutorial-classifier.html b/tutorial-classifier.html new file mode 100644 index 00000000..9afd907a --- /dev/null +++ b/tutorial-classifier.html @@ -0,0 +1,127 @@ + + + + + tutorial-classifier + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial-classifier.ipynb b/tutorial-classifier.ipynb new file mode 100644 index 00000000..08fd20d5 --- /dev/null +++ b/tutorial-classifier.ipynb @@ -0,0 +1,18 @@ + + { + "cells": [], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/tutorial-gan.fsx b/tutorial-gan.fsx new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/tutorial-gan.fsx @@ -0,0 +1,2 @@ + + diff --git a/tutorial-gan.html b/tutorial-gan.html new file mode 100644 index 00000000..77f42d85 --- /dev/null +++ b/tutorial-gan.html @@ -0,0 +1,127 @@ + + + + + tutorial-gan + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial-gan.ipynb b/tutorial-gan.ipynb new file mode 100644 index 00000000..08fd20d5 --- /dev/null +++ b/tutorial-gan.ipynb @@ -0,0 +1,18 @@ + + { + "cells": [], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/tutorial-language.fsx b/tutorial-language.fsx new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/tutorial-language.fsx @@ -0,0 +1,2 @@ + + diff --git a/tutorial-language.html b/tutorial-language.html new file mode 100644 index 00000000..a2d86941 --- /dev/null +++ b/tutorial-language.html @@ -0,0 +1,127 @@ + + + + + tutorial-language + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial-language.ipynb b/tutorial-language.ipynb new file mode 100644 index 00000000..08fd20d5 --- /dev/null +++ b/tutorial-language.ipynb @@ -0,0 +1,18 @@ + + { + "cells": [], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + + diff --git a/tutorial-vae.fsx b/tutorial-vae.fsx new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/tutorial-vae.fsx @@ -0,0 +1,2 @@ + + diff --git a/tutorial-vae.html b/tutorial-vae.html new file mode 100644 index 00000000..65b4e94f --- /dev/null +++ b/tutorial-vae.html @@ -0,0 +1,127 @@ + + + + + tutorial-vae + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+
+
+ + + + + + + +
+
+ + +
+
+
+
+
+
+

© Copyright 2025, Furnace Contributors.

+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial-vae.ipynb b/tutorial-vae.ipynb new file mode 100644 index 00000000..08fd20d5 --- /dev/null +++ b/tutorial-vae.ipynb @@ -0,0 +1,18 @@ + + { + "cells": [], + "metadata": { + "kernelspec": {"display_name": ".NET (F#)", "language": "F#", "name": ".net-fsharp"}, + "langauge_info": { + "file_extension": ".fs", + "mimetype": "text/x-fsharp", + "name": "C#", + "pygments_lexer": "fsharp", + "version": "4.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } + +