Skip to content

Commit

Permalink
style(pre-commit): autofix
Browse files Browse the repository at this point in the history
  • Loading branch information
pre-commit-ci[bot] committed Oct 15, 2023
1 parent 6417ba1 commit 2d995d0
Show file tree
Hide file tree
Showing 27 changed files with 548 additions and 432 deletions.
2 changes: 1 addition & 1 deletion localization/nerf_based_localizer/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ NeRFBasedLocalizer is a vision-based localization package.
### Input

| Name | Type | Description |
| :---------------| :---------------------------------------------- | :------------------------------- |
| :-------------- | :---------------------------------------------- | :------------------------------- |
| `~/input/pose` | `geometry_msgs::msg::PoseWithCovarianceStamped` | EKF Pose without IMU correction. |
| `~/input/image` | `sensor_msgs::msg::Image` | Camera Image |

Expand Down
137 changes: 62 additions & 75 deletions localization/nerf_based_localizer/doc_image/node_diagram.drawio.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
69 changes: 35 additions & 34 deletions localization/nerf_based_localizer/src/nerf/CustomOps/CustomOps.cu
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,21 @@
// Created by ppwang on 2023/3/17.
//

#include "CustomOps.hpp"
#include "../common.hpp"
#include "../common_cuda.hpp"
#include "CustomOps.hpp"

#define SCALE (16.f)

using Tensor = torch::Tensor;

__global__ void WeightVarLossForwardKernel(int n_outs, float* weights, int* idx_start_end, float* out_vars) {
__global__ void WeightVarLossForwardKernel(
int n_outs, float * weights, int * idx_start_end, float * out_vars)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_outs) return;
int idx_start = idx_start_end[idx * 2];
int idx_end = idx_start_end[idx * 2 + 1];
int idx_end = idx_start_end[idx * 2 + 1];
if (idx_start >= idx_end) {
out_vars[idx] = 0.f;
return;
Expand All @@ -48,12 +50,13 @@ __global__ void WeightVarLossForwardKernel(int n_outs, float* weights, int* idx_
out_vars[idx] = variance;
}


__global__ void WeightVarLossBackwardKernel(int n_outs, float* weights, int* idx_start_end, float* dl_dvars, float* dl_dw) {
__global__ void WeightVarLossBackwardKernel(
int n_outs, float * weights, int * idx_start_end, float * dl_dvars, float * dl_dw)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_outs) return;
int idx_start = idx_start_end[idx * 2];
int idx_end = idx_start_end[idx * 2 + 1];
int idx_end = idx_start_end[idx * 2 + 1];
if (idx_start >= idx_end) {
return;
}
Expand All @@ -79,53 +82,51 @@ __global__ void WeightVarLossBackwardKernel(int n_outs, float* weights, int* idx
}
}

namespace torch::autograd {
namespace torch::autograd
{

class WeightVarLoss : public Function<WeightVarLoss> {
class WeightVarLoss : public Function<WeightVarLoss>
{
public:
static variable_list forward(AutogradContext *ctx,
Tensor weights,
Tensor idx_start_end) {
static variable_list forward(AutogradContext * ctx, Tensor weights, Tensor idx_start_end)
{
CHECK(weights.is_contiguous());
CHECK(idx_start_end.is_contiguous());
int n_outs = idx_start_end.size(0);
Tensor out_vars = torch::empty({ n_outs }, CUDAFloat);
dim3 grid_dim = LIN_GRID_DIM(n_outs);
Tensor out_vars = torch::empty({n_outs}, CUDAFloat);
dim3 grid_dim = LIN_GRID_DIM(n_outs);
dim3 block_dim = LIN_BLOCK_DIM;
WeightVarLossForwardKernel<<<grid_dim, block_dim>>>(n_outs,
weights.data_ptr<float>(),
idx_start_end.data_ptr<int>(),
out_vars.data_ptr<float>());
ctx->save_for_backward({ weights, idx_start_end });
return { out_vars };
WeightVarLossForwardKernel<<<grid_dim, block_dim>>>(
n_outs, weights.data_ptr<float>(), idx_start_end.data_ptr<int>(), out_vars.data_ptr<float>());
ctx->save_for_backward({weights, idx_start_end});
return {out_vars};
}

static variable_list backward(AutogradContext *ctx,
variable_list grad_output) {
static variable_list backward(AutogradContext * ctx, variable_list grad_output)
{
Tensor dl_dvar = grad_output[0].contiguous();
auto saved_tensors = ctx->get_saved_variables();
Tensor& weights = saved_tensors[0];
Tensor& idx_start_end = saved_tensors[1];
Tensor & weights = saved_tensors[0];
Tensor & idx_start_end = saved_tensors[1];

int n_outs = idx_start_end.size(0);
int n_all = weights.size(0);
int n_all = weights.size(0);

Tensor dl_dw = torch::empty({ n_all }, CUDAFloat);
dim3 grid_dim = LIN_GRID_DIM(n_outs);
Tensor dl_dw = torch::empty({n_all}, CUDAFloat);
dim3 grid_dim = LIN_GRID_DIM(n_outs);
dim3 block_dim = LIN_BLOCK_DIM;

WeightVarLossBackwardKernel<<<grid_dim, block_dim>>>(n_outs,
weights.data_ptr<float>(),
idx_start_end.data_ptr<int>(),
dl_dvar.data_ptr<float>(),
dl_dw.data_ptr<float>());
WeightVarLossBackwardKernel<<<grid_dim, block_dim>>>(
n_outs, weights.data_ptr<float>(), idx_start_end.data_ptr<int>(), dl_dvar.data_ptr<float>(),
dl_dw.data_ptr<float>());

return { dl_dw, Tensor() };
return {dl_dw, Tensor()};
}
};

}
} // namespace torch::autograd

Tensor CustomOps::WeightVar(Tensor weights, Tensor idx_start_end) {
Tensor CustomOps::WeightVar(Tensor weights, Tensor idx_start_end)
{
return torch::autograd::WeightVarLoss::apply(weights.contiguous(), idx_start_end.contiguous())[0];
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
// Created by ppwang on 2022/10/5.
//

#ifndef NERF_BASED_LOCALIZER__CUSTOM_OPS_HPP_
#define NERF_BASED_LOCALIZER__CUSTOM_OPS_HPP_
#ifndef NERF__CUSTOMOPS__CUSTOMOPS_HPP_
#define NERF__CUSTOMOPS__CUSTOMOPS_HPP_

#include <torch/torch.h>

Expand All @@ -40,4 +40,4 @@ torch::Tensor WeightVar(torch::Tensor weights, torch::Tensor idx_start_end);

}

#endif // NERF_BASED_LOCALIZER__CUSTOM_OPS_HPP_
#endif // NERF__CUSTOMOPS__CUSTOMOPS_HPP_
Loading

0 comments on commit 2d995d0

Please sign in to comment.