Skip to content

Commit

Permalink
Mark the backend as WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
nathanielsimard committed Apr 29, 2024
1 parent afb20b1 commit cc1a719
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 18 deletions.
2 changes: 0 additions & 2 deletions backend-comparison/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@ tch-cpu = ["burn/tch"]
tch-gpu = ["burn/tch"]
wgpu = ["burn/wgpu", "burn/autotune"]
wgpu-fusion = ["wgpu", "burn/fusion"]
cuda = ["burn/cuda", "burn/autotune"]
cuda-fusion = ["cuda", "burn/fusion"]

[dependencies]
arboard = { workspace = true }
Expand Down
7 changes: 0 additions & 7 deletions backend-comparison/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,6 @@ macro_rules! bench_on_backend {
);
}

#[cfg(feature = "cuda")]
{
use burn::backend::Cuda;

bench::<Cuda>(&Default::default(), url, token);
}

#[cfg(feature = "tch-gpu")]
{
use burn::backend::{libtorch::LibTorchDevice, LibTorch};
Expand Down
10 changes: 2 additions & 8 deletions crates/burn-core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ default = [
"burn-tch?/default",
"burn-tensor/default",
"burn-wgpu?/default",
"burn-cuda?/default",
"burn-autodiff?/default",
]
std = [
Expand All @@ -31,7 +30,6 @@ std = [
"burn-ndarray?/std",
"burn-tensor/std",
"burn-wgpu?/std",
"burn-cuda?/std",
"flate2",
"half/std",
"log",
Expand Down Expand Up @@ -60,7 +58,6 @@ doc = [
"burn-tch/doc",
"burn-tensor/doc",
"burn-wgpu/doc",
"burn-cuda/doc",
]
dataset = ["burn-dataset"]
network = ["burn-common/network"]
Expand All @@ -72,23 +69,21 @@ wasm-sync = ["burn-tensor/wasm-sync", "burn-common/wasm-sync"]

# Backend
autodiff = ["burn-autodiff"]
fusion = ["burn-wgpu?/fusion", "burn-cuda?/fusion"]
fusion = ["burn-wgpu?/fusion"]

## Backend features
candle-cuda = ["burn-candle?/cuda"]
metal = ["burn-candle?/metal"]
accelerate = ["burn-candle?/accelerate", "burn-ndarray?/blas-accelerate"]
openblas = ["burn-ndarray?/blas-openblas"]
openblas-system = ["burn-ndarray?/blas-openblas-system"]
blas-netlib = ["burn-ndarray?/blas-netlib"]
autotune = ["burn-wgpu?/autotune", "burn-cuda?/autotune"]
autotune = ["burn-wgpu?/autotune"]
template = ["burn-wgpu?/template"]

ndarray = ["burn-ndarray"]
tch = ["burn-tch"]
candle = ["burn-candle"]
wgpu = ["burn-wgpu"]
cuda = ["burn-cuda"]

# Custom deserializer for Record that is helpful for importing data, such as PyTorch pt files.
record-item-custom-serde = ["thiserror", "regex"]
Expand All @@ -109,7 +104,6 @@ burn-derive = { path = "../burn-derive", version = "0.14.0" }
burn-tensor = { path = "../burn-tensor", version = "0.14.0", default-features = false }

# Backends
burn-cuda = { path = "../burn-cuda", version = "0.14.0", optional = true, default-features = false }
burn-ndarray = { path = "../burn-ndarray", version = "0.14.0", optional = true, default-features = false }
burn-wgpu = { path = "../burn-wgpu", version = "0.14.0", optional = true, default-features = false }
burn-autodiff = { path = "../burn-autodiff", version = "0.14.0", optional = true }
Expand Down
5 changes: 5 additions & 0 deletions crates/burn-cuda/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Burn-Cuda

This backend is still a work in progress and not ready to be used.

See #1525
2 changes: 1 addition & 1 deletion crates/burn-jit/src/compute/kernel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ pub struct CompiledKernel {
pub source: String,
/// Size of a workgroup for the compiled kernel
pub workgroup_size: WorkgroupSize,
/// TODO:
/// The number of bytes used by the share memory
pub shared_mem_bytes: usize,
}

Expand Down

0 comments on commit cc1a719

Please sign in to comment.