Skip to content

Commit

Permalink
compatibility with torch v2 and mc/mcs bug fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
domkirke committed Nov 29, 2023
1 parent de5dc31 commit 5941793
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 11 deletions.
2 changes: 1 addition & 1 deletion src/backend/CMakeLists.txt
Expand Up @@ -6,7 +6,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")

add_library(backend STATIC parsing_utils.cpp backend.cpp)
target_link_libraries(backend "${TORCH_LIBRARIES}")
set_property(TARGET backend PROPERTY CXX_STANDARD 14)
set_property(TARGET backend PROPERTY CXX_STANDARD 17)

if(MSVC)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
Expand Down
20 changes: 18 additions & 2 deletions src/backend/backend.cpp
Expand Up @@ -18,6 +18,9 @@ void Backend::perform(std::vector<float *> in_buffer,
c10::InferenceMode guard;

auto params = get_method_params(method);
// std::cout << "in_buffer length : " << in_buffer.size() << std::endl;
// std::cout << "out_buffer length : " << out_buffer.size() << std::endl;

if (!params.size())
return;

Expand All @@ -31,13 +34,20 @@ void Backend::perform(std::vector<float *> in_buffer,

// COPY BUFFER INTO A TENSOR
std::vector<at::Tensor> tensor_in;
for (auto buf : in_buffer)
tensor_in.push_back(torch::from_blob(buf, {1, 1, n_vec}));
// for (auto buf : in_buffer)
for (int i(0); i < in_buffer.size(); i++) {
tensor_in.push_back(torch::from_blob(in_buffer[i], {1, 1, n_vec}));
// std::cout << i << " : " << tensor_in[i].min().item<float>() << std::endl;
}

auto cat_tensor_in = torch::cat(tensor_in, 1);
cat_tensor_in = cat_tensor_in.reshape({in_dim, n_batches, -1, in_ratio});
cat_tensor_in = cat_tensor_in.select(-1, -1);
cat_tensor_in = cat_tensor_in.permute({1, 0, 2});
// std::cout << cat_tensor_in.size(0) << ";" << cat_tensor_in.size(1) << ";" << cat_tensor_in.size(2) << std::endl;
// for (int i = 0; i < cat_tensor_in.size(1); i++ )
// std::cout << cat_tensor_in[0][i][0] << ";";
// std::cout << std::endl;

// SEND TENSOR TO DEVICE
std::unique_lock<std::mutex> model_lock(m_model_mutex);
Expand All @@ -59,6 +69,12 @@ void Backend::perform(std::vector<float *> in_buffer,
int out_batches(tensor_out.size(0)), out_channels(tensor_out.size(1)),
out_n_vec(tensor_out.size(2));

// for (int b(0); b < out_batches; b++) {
// for (int c(0); c < out_channels; c++) {
// std::cout << b << ";" << c << ";" << tensor_out[b][c].min().item<float>() << std::endl;
// }
// }

// CHECKS ON TENSOR SHAPE
if (out_batches * out_channels != out_buffer.size()) {
std::cout << "bad out_buffer size, expected " << out_batches * out_channels
Expand Down
23 changes: 20 additions & 3 deletions src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp
Expand Up @@ -175,6 +175,22 @@ void model_perform(mc_nn_tilde *mc_nn_instance) {
mc_nn_instance->m_method, mc_nn_instance->get_batches());
}

void check_loop_buffers(mc_nn_tilde *mc_nn_instance, std::vector<float *> &in_model, std::vector<float *> &out_model) {
if (mc_nn_instance->m_in_model.size() != in_model.size())
{
in_model.clear();
for (auto &ptr : mc_nn_instance->m_in_model)
in_model.push_back(ptr.get());

}
if (mc_nn_instance->m_out_model.size() != out_model.size())
{
out_model.clear();
for (auto &ptr : mc_nn_instance->m_out_model)
out_model.push_back(ptr.get());
}
}

void model_perform_loop(mc_nn_tilde *mc_nn_instance) {
std::vector<float *> in_model, out_model;

Expand All @@ -185,6 +201,7 @@ void model_perform_loop(mc_nn_tilde *mc_nn_instance) {
out_model.push_back(ptr.get());

while (!mc_nn_instance->m_should_stop_perform_thread) {
check_loop_buffers(mc_nn_instance, in_model, out_model);
if (mc_nn_instance->m_data_available_lock.try_acquire_for(
std::chrono::milliseconds(200))) {
mc_nn_instance->m_model->perform(
Expand Down Expand Up @@ -275,7 +292,7 @@ mc_nn_tilde::mc_nn_tilde(const atoms &args)
// CREATE INLETS, OUTLETS and BUFFERS
m_in_buffer = std::make_unique<circular_buffer<double, float>[]>(
m_in_dim * get_batches());
for (int i(0); i < m_in_dim; i++) {
for (int i(0); i < m_in_dim * get_batches(); i++) {
std::string input_label = "";
try {
input_label = m_model->get_model()
Expand All @@ -294,7 +311,7 @@ mc_nn_tilde::mc_nn_tilde(const atoms &args)

m_out_buffer = std::make_unique<circular_buffer<float, double>[]>(
m_out_dim * get_batches());
for (int i(0); i < m_out_dim; i++) {
for (int i(0); i < m_out_dim * get_batches(); i++) {
std::string output_label = "";
try {
output_label = m_model->get_model()
Expand Down Expand Up @@ -404,7 +421,7 @@ void mc_nn_tilde::perform(audio_bundle input, audio_bundle output) {
if (m_in_buffer[0].full()) { // BUFFER IS FULL
if (!m_use_thread) {
// TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER
for (int c(0); c < m_in_dim; c++)
for (int c(0); c < m_in_dim * get_batches(); c++)
m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size);

// CALL MODEL PERFORM IN CURRENT THREAD
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/maxmsp/mcs.nn_tilde/CMakeLists.txt
Expand Up @@ -44,7 +44,7 @@ add_library(
)
target_link_libraries(${PROJECT_NAME} PRIVATE backend)

set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14)
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 17)


if (APPLE) # SEARCH FOR TORCH DYLIB IN THE LOADER FOLDER
Expand Down
7 changes: 4 additions & 3 deletions src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp
Expand Up @@ -386,13 +386,14 @@ void mc_bnn_tilde::perform(audio_bundle input, audio_bundle output) {
for (int d(0); d < m_in_dim; d++) {
auto in = input.samples(b * m_in_dim + d);
m_in_buffer[d * get_batches() + b].put(in, vec_size);
std::cout << "populate batch " << b << "; channel " << d << " into buffer" << d * get_batches() + b << "; value : " << in[0] << std::endl;
}
}

if (m_in_buffer[0].full()) { // BUFFER IS FULL
if (!m_use_thread) {
// TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER
for (int c(0); c < m_in_dim; c++)
for (int c(0); c < m_in_dim * get_batches(); c++)
m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size);

// CALL MODEL PERFORM IN CURRENT THREAD
Expand All @@ -404,11 +405,11 @@ void mc_bnn_tilde::perform(audio_bundle input, audio_bundle output) {

} else if (m_result_available_lock.try_acquire()) {
// TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER
for (int c(0); c < m_in_dim; c++)
for (int c(0); c < m_in_dim * get_batches(); c++)
m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size);

// TRANSFER MEMORY BETWEEN OUTPUT CIRCULAR BUFFER AND MODEL BUFFER
for (int c(0); c < m_out_dim; c++)
for (int c(0); c < m_out_dim * get_batches(); c++)
m_out_buffer[c].put(m_out_model[c].get(), m_buffer_size);

// SIGNAL PERFORM THREAD THAT DATA IS AVAILABLE
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/puredata/nn_tilde/CMakeLists.txt
Expand Up @@ -70,4 +70,4 @@ target_link_libraries(nn PRIVATE backend)
if (MSVC)
target_link_libraries(nn PRIVATE "${PUREDATA_BIN_DIR}/pd.lib")
endif()
set_property(TARGET nn PROPERTY CXX_STANDARD 14)
set_property(TARGET nn PROPERTY CXX_STANDARD 17)

0 comments on commit 5941793

Please sign in to comment.