Skip to content

Commit bf51c44

Browse files
Fix some function names to CamelCase (#770)
* Fix some function names to CamelCase * Fix typo in README
1 parent 69c4b65 commit bf51c44

8 files changed

+21
-21
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ See [this README](/tools) if you want to use Docker.
148148
## Classify an image
149149

150150
Once you have installed nGraph bridge, you can use TensorFlow to train a neural network or run inference using a trained model.
151-
The only change required to a scipt is adding
151+
The only change required to a script is adding
152152

153153
import ngraph_bridge
154154

ngraph_bridge/backend.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,12 @@ Backend::Backend(const string& config) {
3939
m_device = config;
4040
}
4141

42-
shared_ptr<Executable> Backend::compile(shared_ptr<ngraph::Function> func,
42+
shared_ptr<Executable> Backend::Compile(shared_ptr<ngraph::Function> func,
4343
bool) {
4444
return make_shared<Executable>(func, m_device);
4545
}
4646

47-
bool Backend::is_supported(const Node& node) const {
47+
bool Backend::IsSupported(const Node& node) const {
4848
// TODO: check if the given backend/device supports the op. Right now we're
4949
// assuming
5050
// that the selected backend supports all opset5 ops

ngraph_bridge/backend.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,9 @@ class Backend {
3434
Backend(const string& configuration_string);
3535
~Backend() {}
3636

37-
shared_ptr<Executable> compile(shared_ptr<ngraph::Function> func,
37+
shared_ptr<Executable> Compile(shared_ptr<ngraph::Function> func,
3838
bool enable_performance_data = false);
39-
bool is_supported(const ngraph::Node& node) const;
39+
bool IsSupported(const ngraph::Node& node) const;
4040

4141
private:
4242
string m_device;

ngraph_bridge/executable.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -147,12 +147,12 @@ Executable::Executable(shared_ptr<Function> func, string device)
147147
m_infer_req = exe_network.CreateInferRequest();
148148
}
149149

150-
bool Executable::call(const vector<shared_ptr<runtime::Tensor>>& inputs,
150+
bool Executable::Call(const vector<shared_ptr<runtime::Tensor>>& inputs,
151151
vector<shared_ptr<runtime::Tensor>>& outputs) {
152152
if (m_trivial_fn) {
153153
NGRAPH_VLOG(2) << "Calling trivial IE function with inputs="
154154
<< inputs.size() << " outputs=" << outputs.size();
155-
return call_trivial(inputs, outputs);
155+
return CallTrivial(inputs, outputs);
156156
}
157157

158158
// Check if the number of inputs that the CNN network expects is equal to the
@@ -236,8 +236,8 @@ bool Executable::call(const vector<shared_ptr<runtime::Tensor>>& inputs,
236236
return true;
237237
}
238238

239-
bool Executable::call_trivial(const vector<shared_ptr<runtime::Tensor>>& inputs,
240-
vector<shared_ptr<runtime::Tensor>>& outputs) {
239+
bool Executable::CallTrivial(const vector<shared_ptr<runtime::Tensor>>& inputs,
240+
vector<shared_ptr<runtime::Tensor>>& outputs) {
241241
// outputs are in the same order as results
242242
auto results = m_trivial_fn->get_results();
243243
if (outputs.size() == 0 && results.size() > 0) {

ngraph_bridge/executable.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -34,16 +34,16 @@ class Executable {
3434
public:
3535
Executable(shared_ptr<ngraph::Function> func, string device);
3636
~Executable() {}
37-
bool call(const vector<shared_ptr<ngraph::runtime::Tensor>>& inputs,
37+
bool Call(const vector<shared_ptr<ngraph::runtime::Tensor>>& inputs,
3838
vector<shared_ptr<ngraph::runtime::Tensor>>& outputs);
3939

40-
const ngraph::ResultVector& get_results() {
40+
const ngraph::ResultVector& GetResults() {
4141
return m_function->get_results();
4242
};
4343

4444
private:
45-
bool call_trivial(const vector<shared_ptr<ngraph::runtime::Tensor>>& inputs,
46-
vector<shared_ptr<ngraph::runtime::Tensor>>& outputs);
45+
bool CallTrivial(const vector<shared_ptr<ngraph::runtime::Tensor>>& inputs,
46+
vector<shared_ptr<ngraph::runtime::Tensor>>& outputs);
4747

4848
InferenceEngine::CNNNetwork m_network;
4949
InferenceEngine::InferRequest m_infer_req;

ngraph_bridge/kernels/ngraph_encapsulate_op.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ void NGraphEncapsulateOp::Compute(OpKernelContext* ctx) {
262262

263263
// Allocate tensors for the output results.
264264

265-
auto results = ng_exec->get_results();
265+
auto results = ng_exec->GetResults();
266266
std::vector<shared_ptr<ngraph::runtime::Tensor>> ng_outputs(results.size(),
267267
nullptr);
268268
std::vector<int> dyn_shape_tensors;
@@ -315,7 +315,7 @@ void NGraphEncapsulateOp::Compute(OpKernelContext* ctx) {
315315
<< "NGraphEncapsulateOp::Compute call starting for cluster "
316316
<< m_cluster_id;
317317
try {
318-
ng_exec->call(ng_inputs, ng_outputs);
318+
ng_exec->Call(ng_inputs, ng_outputs);
319319
} catch (const std::exception& exp) {
320320
string status_string = "Caught exception while executing cluster " +
321321
to_string(m_cluster_id) + ": " +
@@ -433,7 +433,7 @@ Status NGraphEncapsulateOp::GetExecutable(
433433

434434
NG_TRACE("Compile nGraph", m_name, "");
435435
try {
436-
ng_exec = backend->compile(ng_function);
436+
ng_exec = backend->Compile(ng_function);
437437
} catch (const std::exception& ex) {
438438
return errors::Internal("Failed to compile function " + m_name + ": ",
439439
ex.what());

ngraph_bridge/ngraph_mark_for_clustering.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ Status IsSupportedByBackend(
189189
// Loop through the ngraph op list to query
190190
for (auto it = ng_op->second.begin(); it != ng_op->second.end(); it++) {
191191
// Pass ngraph node to check if backend supports this op
192-
auto ret = op_backend->is_supported(**it);
192+
auto ret = op_backend->IsSupported(**it);
193193
if (!ret) {
194194
is_supported = false;
195195
return Status::OK();

test/test_ngraph_exec.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -185,9 +185,9 @@ TEST_F(NGraphExecTest, Axpy) {
185185
t_y->write(&v_x, sizeof(v_x));
186186

187187
// Execute the nGraph function.
188-
auto exec = backend->compile(ng_function);
188+
auto exec = backend->Compile(ng_function);
189189
vector<shared_ptr<ng::runtime::Tensor>> outputs;
190-
exec->call({t_x, t_y}, outputs);
190+
exec->Call({t_x, t_y}, outputs);
191191

192192
for (size_t i = 0; i < ng_function->get_output_size(); i++) {
193193
DumpNGTensor<float>(cout, ng_function->get_output_op(i)->get_name(),
@@ -240,9 +240,9 @@ TEST_F(NGraphExecTest, Axpy8bit) {
240240
t_y->write(&v_x, sizeof(v_x));
241241

242242
// Execute the nGraph function.
243-
auto exec = backend->compile(ng_function);
243+
auto exec = backend->Compile(ng_function);
244244
vector<shared_ptr<ng::runtime::Tensor>> outputs;
245-
exec->call({t_x, t_y}, outputs);
245+
exec->Call({t_x, t_y}, outputs);
246246

247247
for (size_t i = 0; i < ng_function->get_output_size(); i++) {
248248
DumpNGTensor<int8>(cout, ng_function->get_output_op(i)->get_name(),

0 commit comments

Comments
 (0)