Skip to content
This repository has been archived by the owner on Feb 7, 2023. It is now read-only.

Commit

Permalink
comment out unused parameters
Browse files Browse the repository at this point in the history
Summary: This uses `clang-tidy` to comment out unused parameters (in functions, methods and lambdas) in fbcode. Cases that the tool failed to handle are fixed manually.

Reviewed By: igorsugak

Differential Revision: D5454343

fbshipit-source-id: 5dee339b4334e25e963891b519a5aa81fbf627b2
  • Loading branch information
vgao1996 authored and facebook-github-bot committed Jul 21, 2017
1 parent 7df0d66 commit b410c51
Show file tree
Hide file tree
Showing 63 changed files with 327 additions and 260 deletions.
6 changes: 4 additions & 2 deletions caffe2/binaries/make_image_db.cc
Expand Up @@ -37,8 +37,10 @@ CAFFE2_DEFINE_bool(warp, false, "If warp is set, warp the images to square.");
namespace caffe2 {

void ConvertImageDataset(
const string& input_folder, const string& list_filename,
const string& output_db_name, const bool shuffle) {
const string& input_folder,
const string& list_filename,
const string& output_db_name,
const bool /*shuffle*/) {
std::ifstream list_file(list_filename);
std::vector<std::pair<std::string, int> > lines;
std::string filename;
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/nervana/nervana_math_gpu.cc
Expand Up @@ -22,7 +22,7 @@ void Gemm<float, CUDAContext, NervanaEngine>(
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
TensorProto::DataType /*math_type*/) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
Expand Down
6 changes: 3 additions & 3 deletions caffe2/contrib/prof/cuda_profile_ops.cc
Expand Up @@ -56,7 +56,7 @@ class CudaProfileInitializeOp : public OperatorBase {
unlink(config_.c_str());
}

virtual bool Run(int /* unused */ stream_id = 0) {
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
// If this fails, check the contents of "output" for hints.
CUDA_CHECK(
cudaProfilerInitialize(config_.c_str(), output_.c_str(), cudaCSV));
Expand All @@ -73,7 +73,7 @@ class CudaProfileStartOp : public OperatorBase {
CudaProfileStartOp(const OperatorDef& operator_def, Workspace* ws)
: OperatorBase(operator_def, ws) {}

virtual bool Run(int /* unused */ stream_id = 0) {
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
CUDA_ENFORCE(cudaProfilerStart());
return true;
}
Expand All @@ -84,7 +84,7 @@ class CudaProfileStopOp : public OperatorBase {
CudaProfileStopOp(const OperatorDef& operator_def, Workspace* ws)
: OperatorBase(operator_def, ws) {}

virtual bool Run(int /* unused */ stream_id = 0) {
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
CUDA_ENFORCE(cudaProfilerStop());
return true;
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/torch/torch_op.h
Expand Up @@ -56,7 +56,7 @@ class Torch final {
return Traits::tensorTy;
}

void setContext(Context* context) {}
void setContext(Context* /*context*/) {}

void setTensor(typename Traits::Tensor* t, Blob* blob) {
CAFFE_ENFORCE_EQ(tensorTy(*blob), Traits::tensorTy);
Expand Down
14 changes: 7 additions & 7 deletions caffe2/contrib/transform/transform.h
Expand Up @@ -63,9 +63,9 @@ class Transform {
* Given the current subgraph (ordered), should we append the new node at idx?
*/
virtual bool PatternRule(
const transform::Graph& g,
const std::vector<int>& subgraph,
int idx) {
const transform::Graph& /*g*/,
const std::vector<int>& /*subgraph*/,
int /*idx*/) {
CAFFE_NOT_IMPLEMENTED;
}

Expand All @@ -74,8 +74,8 @@ class Transform {
* Given a subgraph, can we accept it?
*/
virtual bool ValidatorRule(
const transform::Graph& g,
const std::vector<int>& subgraph) {
const transform::Graph& /*g*/,
const std::vector<int>& /*subgraph*/) {
CAFFE_NOT_IMPLEMENTED;
}

Expand All @@ -84,8 +84,8 @@ class Transform {
* upon the subgraph.
*/
virtual bool ReplaceRule(
const std::vector<int>& subgraph,
transform::Graph* g_ptr) {
const std::vector<int>& /*subgraph*/,
transform::Graph* /*g_ptr*/) {
CAFFE_NOT_IMPLEMENTED;
}

Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/warpctc/ctc_op.cpp
Expand Up @@ -6,7 +6,7 @@ namespace caffe2 {

namespace detail {
template <>
ctcComputeInfo workspaceInfo<CPUContext>(const CPUContext& context) {
ctcComputeInfo workspaceInfo<CPUContext>(const CPUContext& /*context*/) {
ctcComputeInfo result;
result.loc = CTC_CPU;
result.num_threads = 1;
Expand Down
3 changes: 2 additions & 1 deletion caffe2/core/blob_serialization.cc
Expand Up @@ -86,7 +86,8 @@ std::string Blob::Serialize(const string& name) const {
// Specialization for StoreDeviceDetail for CPU - nothing needs to be done.
template <>
void TensorSerializer<CPUContext>::StoreDeviceDetail(
const Tensor<CPUContext>& input, TensorProto* proto) {}
const Tensor<CPUContext>& /*input*/,
TensorProto* /*proto*/) {}

// The actual serialization registry objects.
CAFFE_DEFINE_TYPED_REGISTRY(
Expand Down
7 changes: 5 additions & 2 deletions caffe2/core/blob_serialization.h
Expand Up @@ -263,8 +263,11 @@ void TensorSerializer<Context>::SerializeWithChunkSize(

template <class Context>
void TensorSerializer<Context>::Serialize(
const Tensor<Context>& input, const string& name,
TensorProto* proto_ptr, size_t chunkBegin, int32_t chunkSize) {
const Tensor<Context>& input,
const string& /*name*/,
TensorProto* proto_ptr,
size_t chunkBegin,
int32_t chunkSize) {
CAFFE_ENFORCE(
chunkBegin <= input.size(),
"Chunk begin is out of tensor: ",
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/blob_serializer_base.h
Expand Up @@ -46,7 +46,7 @@ class BlobSerializerBase {
const Blob& blob,
const std::string& name,
SerializationAcceptor acceptor,
int chunk_size) {
int /*chunk_size*/) {
// Base implementation.
Serialize(blob, name, acceptor);
}
Expand Down
5 changes: 3 additions & 2 deletions caffe2/core/blob_test.cc
Expand Up @@ -649,7 +649,7 @@ class VectorCursor : public db::Cursor {
pos_ = 0;
}
~VectorCursor() {}
void Seek(const string& key) override {}
void Seek(const string& /*key*/) override {}
void SeekToFirst() override {}
void Next() override {
++pos_;
Expand Down Expand Up @@ -790,7 +790,8 @@ TEST(CustomChunkSize, BigTensorSerialization) {
tensor->mutable_data<float>();
std::mutex mutex;
int counter = 0;
auto acceptor = [&](const std::string& key, const std::string& value) {
auto acceptor = [&](const std::string& /*key*/,
const std::string& /*value*/) {
std::lock_guard<std::mutex> guard(mutex);
counter++;
};
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/common.h
Expand Up @@ -201,7 +201,7 @@ class SkipIndices {
template <>
class SkipIndices<> {
public:
static inline bool Contains(const int i) {
static inline bool Contains(const int /*i*/) {
return false;
}
};
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/context.h
Expand Up @@ -121,7 +121,7 @@ class CPUContext final {

~CPUContext() noexcept {}

inline void SwitchToDevice(int stream_id) {}
inline void SwitchToDevice(int /*stream_id*/) {}
inline void SwitchToDevice() {
SwitchToDevice(0);
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/db.cc
Expand Up @@ -28,7 +28,7 @@ class MiniDBCursor : public Cursor {
}
~MiniDBCursor() {}

void Seek(const string& key) override {
void Seek(const string& /*key*/) override {
LOG(FATAL) << "MiniDB does not support seeking to a specifi key.";
}

Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/db.h
Expand Up @@ -79,7 +79,7 @@ class Transaction {
*/
class DB {
public:
DB(const string& source, Mode mode) : mode_(mode) {}
DB(const string& /*source*/, Mode mode) : mode_(mode) {}
virtual ~DB() { }
/**
* Closes the database.
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/logging.h
Expand Up @@ -51,7 +51,7 @@ constexpr bool IsUsingGoogleLogging() {
*/
void ShowLogInfoToStderr();

inline void MakeStringInternal(std::stringstream& ss) {}
inline void MakeStringInternal(std::stringstream& /*ss*/) {}

template <typename T>
inline void MakeStringInternal(std::stringstream& ss, const T& t) {
Expand Down
6 changes: 3 additions & 3 deletions caffe2/core/net.h
Expand Up @@ -52,9 +52,9 @@ class NetBase {
* opeartor.
*/
virtual vector<float> TEST_Benchmark(
const int warmup_runs,
const int main_runs,
const bool run_individual) {
const int /*warmup_runs*/,
const int /*main_runs*/,
const bool /*run_individual*/) {
LOG(ERROR) << "Benchmark not implemented for this net type.";
return vector<float>();
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/net_test.cc
Expand Up @@ -23,7 +23,7 @@ class NetTestDummyOp final : public OperatorBase {
: OperatorBase(operator_def, ws),
fail_(OperatorBase::GetSingleArgument<bool>("fail", false)) {}

bool Run(int /* unused */ stream_id) override {
bool Run(int /* unused */ /*stream_id*/) override {
if (fail_) {
return false;
}
Expand Down
4 changes: 2 additions & 2 deletions caffe2/core/operator.h
Expand Up @@ -101,7 +101,7 @@ class OperatorBase {
inline const vector<const Blob*>& Inputs() const { return inputs_; }
inline const vector<Blob*>& Outputs() { return outputs_; }

virtual bool Run(int /* unused */ stream_id = 0) {
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
CAFFE_NOT_IMPLEMENTED;
}

Expand Down Expand Up @@ -413,7 +413,7 @@ struct DispatchHelper<FixedValues<FirstVal, Values...>, ExtraArgs...> {
template <typename... ExtraArgs>
struct DispatchHelper<FixedValues<>, ExtraArgs...> {
template <typename Op>
static bool call(Op* op, TIndex size) {
static bool call(Op* op, TIndex /*size*/) {
return op->template DoRunWithValue<ExtraArgs..., -1>();
}
};
Expand Down
10 changes: 5 additions & 5 deletions caffe2/core/operator_schema.cc
Expand Up @@ -210,11 +210,11 @@ OpSchema& OpSchema::IdenticalTypeAndShapeOfInputDim(int idx, int dim) {

OpSchema& OpSchema::ScalarType(::caffe2::TensorProto_DataType dt) {
return TensorInferenceFunction(
[dt](const OperatorDef&, const vector<TensorShape>& input_types) {
vector<TensorShape> out(1);
out[0].set_data_type(dt);
return out;
});
[dt](const OperatorDef&, const vector<TensorShape>& /*input_types*/) {
vector<TensorShape> out(1);
out[0].set_data_type(dt);
return out;
});
}

OpSchema& OpSchema::CostInferenceFunction(CostInferenceFunctionType function) {
Expand Down
13 changes: 6 additions & 7 deletions caffe2/core/operator_schema_test.cc
Expand Up @@ -212,13 +212,12 @@ TEST(OperatorSchemaTest, TestCastSchema) {
OPERATOR_SCHEMA(OpSchemaCostInference)
.NumInputs(2)
.NumOutputs(2)
.CostInferenceFunction(
[](const OperatorDef& def, const vector<TensorShape>& inputs) {
struct OpSchema::Cost c;
c.flops =
2 * inputs[0].dims(0) * inputs[0].dims(1) * inputs[1].dims(1);
return c;
});
.CostInferenceFunction([](const OperatorDef& /*def*/,
const vector<TensorShape>& inputs) {
struct OpSchema::Cost c;
c.flops = 2 * inputs[0].dims(0) * inputs[0].dims(1) * inputs[1].dims(1);
return c;
});

TEST(OperatorSchemaTest, TestCostInference) {
const OpSchema* schema = OpSchemaRegistry::Schema("OpSchemaCostInference");
Expand Down
8 changes: 4 additions & 4 deletions caffe2/core/operator_test.cc
Expand Up @@ -12,7 +12,7 @@ namespace caffe2 {
class JustTest : public OperatorBase {
public:
using OperatorBase::OperatorBase;
bool Run(int /* unused */ stream_id) override {
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
virtual string type() {
Expand All @@ -26,7 +26,7 @@ class JustTestAndNeverConstructs : public JustTest {
: JustTest(def, ws) {
throw UnsupportedOperatorFeature("I just don't construct.");
}
bool Run(int /* unused */ stream_id) override {
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
string type() override {
Expand All @@ -37,7 +37,7 @@ class JustTestAndNeverConstructs : public JustTest {
class JustTestAndDoesConstruct : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ stream_id) override {
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
string type() override {
Expand All @@ -48,7 +48,7 @@ class JustTestAndDoesConstruct : public JustTest {
class JustTestWithSomeOutput : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ stream_id) override {
bool Run(int /* unused */ /*stream_id*/) override {
*OperatorBase::Output<int>(0) = 5;
return true;
}
Expand Down
4 changes: 2 additions & 2 deletions caffe2/core/plan_executor.cc
Expand Up @@ -72,7 +72,7 @@ struct Reporter {
// Returns a function that returns `true` if we should continue
// iterating, given the current iteration count.
std::function<bool(int64_t)> getContinuationTest(
Workspace* ws,
Workspace* /*ws*/,
const ExecutionStep& step) {
if (step.has_should_stop_blob()) {
CAFFE_ENFORCE(
Expand All @@ -93,7 +93,7 @@ std::function<bool(int64_t)> getContinuationTest(
if (onlyOnce) {
return [](int64_t i) { return i == 0; };
} else {
return [](int64_t i) { return true; };
return [](int64_t /*i*/) { return true; };
}
}
};
Expand Down
3 changes: 2 additions & 1 deletion caffe2/core/typeid.h
Expand Up @@ -221,7 +221,8 @@ class TypeMeta {
* A placeholder function for types that do not allow assignment.
*/
template <typename T>
static void _CopyNotAllowed(const void* src, void* dst, size_t n) {
static void
_CopyNotAllowed(const void* /*src*/, void* /*dst*/, size_t /*n*/) {
std::cerr << "Type " << Name<T>() << " does not allow assignment.";
// This is an error by design, so we will quit loud.
abort();
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/workspace.h
Expand Up @@ -37,7 +37,7 @@ struct StopOnSignal {

StopOnSignal(const StopOnSignal& other) : handler_(other.handler_) {}

bool operator()(int iter) {
bool operator()(int /*iter*/) {
return handler_->CheckForSignals() != SignalHandler::Action::STOP;
}

Expand Down
4 changes: 3 additions & 1 deletion caffe2/cuda_rtc/elemenntwise_rtc_gpu.cc
Expand Up @@ -11,7 +11,9 @@ class ElementwiseRTCFunction
ElementwiseRTCFunction() : CudaRTCFunction(), name_(GetUniqueName()) {}

template <typename... Args>
string KernelName(Args... args) { return name_; }
string KernelName(Args... /*args*/) {
return name_;
}

template <typename... Args>
string GetSource(Args... args);
Expand Down
8 changes: 6 additions & 2 deletions caffe2/cuda_rtc/pool_op_rtc_gpu.cc
Expand Up @@ -104,7 +104,9 @@ class MaxPoolRTCFunction : public CudaRTCFunction<MaxPoolRTCFunction> {
MaxPoolRTCFunction() : CudaRTCFunction(), name_(GetUniqueName()) {}

template <typename... Args>
string KernelName(Args... args) { return name_; }
string KernelName(Args... /*args*/) {
return name_;
}

template <typename... Args>
string GetSource(Args... args);
Expand All @@ -119,7 +121,9 @@ class MaxPoolGradientRTCFunction
MaxPoolGradientRTCFunction() : CudaRTCFunction(), name_(GetUniqueName()) {}

template <typename... Args>
string KernelName(Args... args) { return name_; }
string KernelName(Args... /*args*/) {
return name_;
}

template <typename... Args>
string GetSource(Args... args);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/db/protodb.cc
Expand Up @@ -13,7 +13,7 @@ class ProtoDBCursor : public Cursor {
: proto_(proto), iter_(0) {}
~ProtoDBCursor() {}

void Seek(const string& str) override {
void Seek(const string& /*str*/) override {
CAFFE_THROW("ProtoDB is not designed to support seeking.");
}

Expand Down

0 comments on commit b410c51

Please sign in to comment.