Skip to content

Commit

Permalink
Back out "Revert D10123245: Back out "codemod cuda_gpu_id to device_i…
Browse files Browse the repository at this point in the history
…d"" (#12232)

Summary:
Pull Request resolved: #12232

Original commit changeset: fca91fea58b7

This adds proper modifications to the DeviceType <->DeviceOption conversion code added in D10033396

Reviewed By: jerryzh168

Differential Revision: D10132473

fbshipit-source-id: 801ef777e2950982cb47b48051b1471a0a91e64b
  • Loading branch information
bddppq authored and facebook-github-bot committed Oct 2, 2018
1 parent 696498d commit ff608a9
Show file tree
Hide file tree
Showing 41 changed files with 163 additions and 121 deletions.
4 changes: 2 additions & 2 deletions caffe2/contrib/nccl/cuda_nccl_op_gpu.cc
Expand Up @@ -11,7 +11,7 @@ nccl::NCCLExecution getNCCLElements(
// We either do an N-N op, or an N-1 op.
CAFFE_ENFORCE(op->InputSize() == op->OutputSize() || op->OutputSize() == 1);
nccl::NCCLExecution ex;
ex.stream_gpu_id = context.device_id();
ex.stream_gpu_id = context.cuda_gpu_id();
ex.stream = context.cuda_stream();
ex.root = op->template GetSingleArgument<int>("root", 0);
ex.elements.resize(op->InputSize());
Expand Down Expand Up @@ -204,7 +204,7 @@ std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>> ncclOpDevInfer(
for (int i = 0; i < def.input().size(); ++i) {
DeviceOption dev;
dev.set_device_type(1);
dev.set_device_id(i);
dev.set_cuda_gpu_id(i);
opt.push_back(dev);
}
return std::make_pair(opt, opt);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/nccl/nccl_ops_test.py
Expand Up @@ -21,7 +21,7 @@
def gpu_device(i):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.device_id = i
device_option.cuda_gpu_id = i
return device_option


Expand Down
4 changes: 2 additions & 2 deletions caffe2/contrib/prof/prof_dag_net.cc
Expand Up @@ -33,9 +33,9 @@ void ProfDAGNet::ValidateOpTensorDevices() {
had_mismatches = true;
LOG(INFO) << "== PERFORMANCE WARNING == \n"
<< " Operator " << node.operator_->debug_def().type()
<< " expects GPU " << mismatch.second.first.device_id()
<< " expects GPU " << mismatch.second.first.cuda_gpu_id()
<< " but tensor [" << mismatch.first << "] is on GPU "
<< mismatch.second.second.device_id();
<< mismatch.second.second.cuda_gpu_id();
}
}
if (!had_mismatches) {
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/tensorboard/tensorboard_exporter.py
Expand Up @@ -177,7 +177,7 @@ def _tf_device(device_option):
if device_option.device_type == caffe2_pb2.CPU:
return "/cpu:*"
if device_option.device_type == caffe2_pb2.CUDA:
return "/gpu:{}".format(device_option.device_id)
return "/gpu:{}".format(device_option.cuda_gpu_id)
raise Exception("Unhandled device", device_option)


Expand Down
8 changes: 4 additions & 4 deletions caffe2/contrib/warpctc/ctc_ops_test.py
Expand Up @@ -79,11 +79,11 @@ def test_ctc_cost_cpu(self):
def test_ctc_cost_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
cuda_gpu_id=0),
is_test=False)
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
cuda_gpu_id=0),
is_test=False,
skip_input_lengths=True)

Expand All @@ -99,10 +99,10 @@ def test_ctc_forward_only_cpu(self):
def test_ctc_forward_only_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
cuda_gpu_id=0),
is_test=True)
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
cuda_gpu_id=0),
is_test=True,
skip_input_lengths=True)
4 changes: 2 additions & 2 deletions caffe2/core/blob_gpu_test.cc
Expand Up @@ -195,7 +195,7 @@ TEST(TensorTest, TensorSerializationMultiDevices) {
}
EXPECT_TRUE(tensor_proto.has_device_detail());
EXPECT_EQ(tensor_proto.device_detail().device_type(), PROTO_CUDA);
EXPECT_EQ(tensor_proto.device_detail().device_id(), gpu_id);
EXPECT_EQ(tensor_proto.device_detail().cuda_gpu_id(), gpu_id);
// Test if the restored blob is still of the same device.
blob.Reset();
EXPECT_NO_THROW(DeserializeBlob(serialized, &blob));
Expand All @@ -205,7 +205,7 @@ TEST(TensorTest, TensorSerializationMultiDevices) {
// Test if we force the restored blob on a different device, we
// can still get so.
blob.Reset();
proto.mutable_tensor()->mutable_device_detail()->set_device_id(0);
proto.mutable_tensor()->mutable_device_detail()->set_cuda_gpu_id(0);
EXPECT_NO_THROW(DeserializeBlob(proto.SerializeAsString(), &blob));
EXPECT_TRUE(BlobIsTensorType(blob, CUDA));
EXPECT_EQ(GetGPUIDForPointer(blob.Get<TensorCUDA>().data<float>()), 0);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/context_gpu.cu
Expand Up @@ -256,7 +256,7 @@ CUDAContext::CUDAContext(const int gpu_id)

CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(
option.has_device_id() ? RectifyGPUID(option.device_id())
option.has_cuda_gpu_id() ? RectifyGPUID(option.cuda_gpu_id())
: CaffeCudaGetDevice()),
random_seed_(
option.has_random_seed() ? option.random_seed()
Expand Down
6 changes: 3 additions & 3 deletions caffe2/core/context_gpu.h
Expand Up @@ -184,7 +184,7 @@ class CAFFE2_CUDA_API CUDAContext final : public BaseContext {
}
}

inline int device_id() const {
inline int cuda_gpu_id() const {
return gpu_id_;
}

Expand Down Expand Up @@ -283,7 +283,7 @@ class CAFFE2_CUDA_API CUDAContext final : public BaseContext {
}

static bool IsStreamFree(const DeviceOption& option, int stream_id) {
auto stream = CUDAContext::cuda_stream(option.device_id(), stream_id);
auto stream = CUDAContext::cuda_stream(option.cuda_gpu_id(), stream_id);
return cudaStreamQuery(stream) == cudaSuccess;
}

Expand Down Expand Up @@ -393,7 +393,7 @@ class CAFFE2_CUDA_API CUDAStaticContext final : public BaseStaticContext {

void ExtractDeviceOption(DeviceOption* device, const void* data) override {
device->set_device_type(TypeToProto(GetDeviceType()));
device->set_device_id(GetGPUIDForPointer(data));
device->set_cuda_gpu_id(GetGPUIDForPointer(data));
}

protected:
Expand Down
6 changes: 3 additions & 3 deletions caffe2/core/cudnn_wrappers.h
Expand Up @@ -122,17 +122,17 @@ class CuDNNWrapper {
void with_cudnn_state(size_t state_idx, F&& f) {
CAFFE_ENFORCE(
state_idx < CAFFE2_COMPILE_TIME_MAX_CUDNN_STATES, "Invalid state_idx");
auto& sync_state = cudnn_states()[context_->device_id()][state_idx];
auto& sync_state = cudnn_states()[context_->cuda_gpu_id()][state_idx];

DeviceGuard dg(context_->device_id());
DeviceGuard dg(context_->cuda_gpu_id());

// We need to serialize execution on the CuDNNState as we can't
// allow multiple threads to race through the cudaEventRecord
// calls (so a worker thread might wait on another worker thread's
// execution)
std::lock_guard<std::mutex> g(sync_state.mutex);
if (!sync_state.state.get()) {
sync_state.state.reset(new CuDNNState(context_->device_id()));
sync_state.state.reset(new CuDNNState(context_->cuda_gpu_id()));
}
CHECK_NOTNULL(sync_state.state.get())->execute(context_->cuda_stream(), f);
}
Expand Down
16 changes: 8 additions & 8 deletions caffe2/core/event_gpu.cc
Expand Up @@ -9,21 +9,21 @@ namespace caffe2 {
struct CudaEventWrapper {
explicit CudaEventWrapper(const DeviceOption& option)
: cuda_stream_(nullptr),
device_id_(option.device_id()),
cuda_gpu_id_(option.cuda_gpu_id()),
status_(EventStatus::EVENT_INITIALIZED) {
CAFFE_ENFORCE(option.device_type(), PROTO_CUDA);
DeviceGuard g(device_id_);
DeviceGuard g(cuda_gpu_id_);
CUDA_ENFORCE(cudaEventCreate(
&cuda_event_, cudaEventDefault | cudaEventDisableTiming));
}
~CudaEventWrapper() {
DeviceGuard g(device_id_);
DeviceGuard g(cuda_gpu_id_);
CUDA_CHECK(cudaEventDestroy(cuda_event_));
}

cudaEvent_t cuda_event_;
cudaStream_t cuda_stream_;
int device_id_;
int cuda_gpu_id_;

std::atomic<int> status_;
std::mutex mutex_recorded_;
Expand Down Expand Up @@ -65,12 +65,12 @@ void EventRecordCUDA(Event* event, const void* context, const char* err_msg) {
const auto& current_device = CaffeCudaGetDevice();
CAFFE_ENFORCE_EQ(
current_device,
wrapper->device_id_,
wrapper->cuda_gpu_id_,
"When you call EventRecordCUDA, your current device should be the same "
"as the device specified by the event.");
CAFFE_ENFORCE_EQ(
current_device,
static_cast<const CUDAContext*>(context)->device_id());
static_cast<const CUDAContext*>(context)->cuda_gpu_id());
CUDA_ENFORCE(cudaEventRecord(
wrapper->cuda_event_,
static_cast<const CUDAContext*>(context)->cuda_stream()));
Expand All @@ -96,7 +96,7 @@ void EventFinishCUDA(const Event* event) {

if (wrapper->status_ == EventStatus::EVENT_SCHEDULED) {
// ok, even if event is already completed and status was not yet updated
DeviceGuard g(wrapper->device_id_);
DeviceGuard g(wrapper->cuda_gpu_id_);
auto cudaResult = cudaEventSynchronize(wrapper->cuda_event_);
if (cudaResult == cudaSuccess) {
wrapper->status_ = EventStatus::EVENT_SUCCESS;
Expand Down Expand Up @@ -127,7 +127,7 @@ void EventWaitCUDACUDA(const Event* event, void* context) {
if (context_stream != event_stream) {
// CAFFE_ENFORCE_EQ(
// CaffeCudaGetDevice(),
// static_cast<const CUDAContext*>(context)->device_id());
// static_cast<const CUDAContext*>(context)->cuda_gpu_id());
CUDA_CHECK(cudaStreamWaitEvent(context_stream, wrapper->cuda_event_, 0));
}
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/hip/event_hip.cc
Expand Up @@ -138,7 +138,7 @@ void EventWaitHIPHIP(const Event* event, void* context)
{
// CAFFE_ENFORCE_EQ(
// CaffeCudaGetDevice(),
// static_cast<const CUDAContext*>(context)->device_id());
// static_cast<const CUDAContext*>(context)->cuda_gpu_id());
HIP_CHECK(hipStreamWaitEvent(context_stream, wrapper->hip_event_, 0));
}
}
Expand Down
4 changes: 2 additions & 2 deletions caffe2/core/memonger.cc
Expand Up @@ -176,7 +176,7 @@ class ComputeBlobRecyclingForDag {
// cuda device option but whose inputs/outputs are on CPU
if (net.op(op_index).type() == "CopyGPUToCPU") {
blob_device_[output].set_device_type(0);
blob_device_[output].set_device_id(0);
blob_device_[output].set_cuda_gpu_id(0);
}
}
}
Expand Down Expand Up @@ -478,7 +478,7 @@ class ComputeBlobRecyclingForDag {
const DeviceOption& device_option) {
const DeviceOption& blob_device = blob_device_[blob_name];
if (device_option.device_type() != blob_device.device_type() ||
device_option.device_id() != blob_device.device_id()) {
device_option.cuda_gpu_id() != blob_device.cuda_gpu_id()) {
return false;
}
for (const int token : req_tokens_[blob_name]) {
Expand Down
4 changes: 2 additions & 2 deletions caffe2/core/net_async_base.cc
Expand Up @@ -157,7 +157,7 @@ TaskThreadPool* AsyncNetBase::pool(const DeviceOption& device_option) {
numa_node_id);
return poolGetter(cpu_pools_, PROTO_CPU, numa_node_id, num_workers_);
} else if (device_option.device_type() == PROTO_CUDA) {
auto gpu_id = device_option.device_id();
auto gpu_id = device_option.cuda_gpu_id();
CAFFE_ENFORCE(
gpu_id >= 0 && gpu_id < FLAGS_caffe2_net_async_max_gpus,
"Invalid GPU id: " + caffe2::to_string(gpu_id));
Expand All @@ -173,7 +173,7 @@ int AsyncNetBase::stream(int task_id) {
const auto& device_option = event(task_id).GetDeviceOption();
int stream_id = 0;
if (device_option.device_type() == PROTO_CUDA) {
int gpu_id = device_option.device_id();
int gpu_id = device_option.cuda_gpu_id();
CAFFE_ENFORCE_GE(gpu_id, 0, "Invalid gpu id: " + caffe2::to_string(gpu_id));
if ((unsigned)gpu_id >= getStreamCounters().size()) {
getStreamCounters().resize(gpu_id + 1, 0);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/net_async_dag_gpu.cc
Expand Up @@ -112,7 +112,7 @@ AsyncDAGNet::AsyncDAGNet(
int AsyncDAGNet::stream(const DeviceOption& device_option) {
int stream_id = 0;
if (device_option.device_type() == PROTO_CUDA) {
int gpu_id = device_option.device_id();
int gpu_id = device_option.cuda_gpu_id();
CAFFE_ENFORCE_GE(gpu_id, 0, "Invalid gpu id: " + caffe2::to_string(gpu_id));
if ((unsigned)gpu_id >= stream_counters_.size()) {
stream_counters_.resize(gpu_id + 1, 0);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/net_gpu_test.cc
Expand Up @@ -124,7 +124,7 @@ TEST(NetTest, DISABLED_ChainingForDifferentDevices) {
type: "NetTestDummy"
device_option {
device_type: 1
device_id: 1
cuda_gpu_id: 1
}
}
)DOC";
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/operator.cc
Expand Up @@ -649,7 +649,7 @@ std::map<string, std::pair<DeviceOption, DeviceOption>> ValidateTensorDevices(
&blob_device);

if (blob_device.device_type() == PROTO_CUDA &&
blob_device.device_id() != op_device.device_id()) {
blob_device.cuda_gpu_id() != op_device.cuda_gpu_id()) {
mismatches[blob_name] = std::make_pair(op_device, blob_device);
} else if (
blob_device.device_type() == PROTO_HIP &&
Expand Down
2 changes: 1 addition & 1 deletion caffe2/mkl/utils/mkl_memory.cc
Expand Up @@ -26,7 +26,7 @@ static vector<int64_t> GetMKLTensorInfo(
const mkl::MKLMemory<T>* tc = static_cast<const mkl::MKLMemory<T>*>(c);
*capacity = tc->size() * sizeof(T);
device->set_device_type(PROTO_MKLDNN);
device->set_device_id(0);
device->set_cuda_gpu_id(0);
return tc->dims();
}

Expand Down
4 changes: 2 additions & 2 deletions caffe2/observers/profile_observer_gpu.cc
Expand Up @@ -70,7 +70,7 @@ void ProfileOperatorObserver::Start() {
int device;
cudaGetDevice(&device);

cudaSetDevice(context->device_id());
cudaSetDevice(context->cuda_gpu_id());
cudaEventCreate(&start_);
cudaEventRecord(start_, context->cuda_stream());

Expand All @@ -92,7 +92,7 @@ void ProfileOperatorObserver::Stop() {
int device;
cudaGetDevice(&device);

cudaSetDevice(context->device_id());
cudaSetDevice(context->cuda_gpu_id());
cudaEventCreate(&stop_);
cudaEventRecord(stop_, context->cuda_stream());
cudaEventSynchronize(stop_);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/onnx/backend.cc
Expand Up @@ -65,7 +65,7 @@ caffe2::DeviceOption GetDeviceOption(const Device& onnx_device) {
{DeviceType::CUDA, caffe2::DeviceType::CUDA}};
caffe2::DeviceOption d;
d.set_device_type(static_cast<int32_t>(m.at(onnx_device.type)));
d.set_device_id(onnx_device.device_id);
d.set_cuda_gpu_id(onnx_device.device_id);
return d;
}

Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/load_save_op_gpu.cc
Expand Up @@ -9,7 +9,7 @@ void LoadOp<CUDAContext>::SetCurrentDevice(BlobProto* proto) {
proto->mutable_tensor()->clear_device_detail();
auto* device_detail = proto->mutable_tensor()->mutable_device_detail();
device_detail->set_device_type(PROTO_CUDA);
device_detail->set_device_id(CaffeCudaGetDevice());
device_detail->set_cuda_gpu_id(CaffeCudaGetDevice());
}
}

Expand Down
4 changes: 2 additions & 2 deletions caffe2/operators/rnn/recurrent_network_executor_gpu.cc
Expand Up @@ -72,11 +72,11 @@ void CUDARecurrentNetworkExecutor::_ExecRange(int from, int to) {
if (gpu_id == -1 &&
rnn_op.op->device_option().device_type() ==
DeviceTypeProto::PROTO_CUDA) {
gpu_id = rnn_op.op->device_option().device_id();
gpu_id = rnn_op.op->device_option().cuda_gpu_id();
} else {
CAFFE_ENFORCE(
rnn_op.op->device_option().device_type() == 0 ||
rnn_op.op->device_option().device_id() == gpu_id,
rnn_op.op->device_option().cuda_gpu_id() == gpu_id,
"RNN Executor only supports ops on one GPU");
}

Expand Down
2 changes: 1 addition & 1 deletion caffe2/proto/caffe2.proto
Expand Up @@ -183,7 +183,7 @@ message DeviceOption {
// optional DeviceType device_type = 1 [ default = CPU ];
optional int32 device_type = 1 [ default = 0 ]; // 0 is CPU.
// [CUDA specific] the cuda gpu id.
optional int32 device_id = 2;
optional int32 cuda_gpu_id = 2;
// [general] The random seed to start the device random number generator with.
optional uint32 random_seed = 3;
// [general] What node this op should execute on.
Expand Down

0 comments on commit ff608a9

Please sign in to comment.