diff --git a/lite/examples/sound_classification/android_legacy/README.md b/lite/examples/sound_classification/android_legacy/README.md new file mode 100644 index 00000000000..f7db9bf9bff --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/README.md @@ -0,0 +1,49 @@ +# Sound Classifier Android sample. + +## Important notice + +* See this sample for how to integrate a sound classification model into an + Android app using + [TFLite Task Library](https://github.com/tensorflow/examples/tree/master/lite/examples/sound_classification/android) +* This copy of the Android sound classification sample is to demonstrate how + to integrate sound classification models trained on Teachable Machine. +* This sample is only for compatibility purpose and will be removed in a near + future when Teachable Machine exports models that are compatible with Task + Library. + +## Requirements + +* Android Studio 4.1 (installed on a Linux, Mac or Windows machine) +* An Android device, or an Android Emulator + +## Build and run + +### Step 1. Clone the TensorFlow examples source code + +Clone the TensorFlow examples GitHub repository to your computer to get the demo +application. + +``` +git clone https://github.com/tensorflow/examples +``` + +### Step 2. Import the sample app to Android Studio + +Open the TensorFlow source code in Android Studio. To do this, open Android +Studio and select `Import Projects (Gradle, Eclipse ADT, etc.)`, setting the +folder to `examples/lite/examples/sound_classification/android` + +### Step 3. Run the Android app + +Connect the Android device to the computer and be sure to approve any ADB +permission prompts that appear on your phone. Select `Run -> Run app.` Select +the deployment target in the connected devices to the device on which the app +will be installed. This will install the app on the device. + +To test the app, open the app called `TFL Sound Classifier` on your device. +Re-installing the app may require you to uninstall the previous installations. + +## Resources used: + +* [TensorFlow Lite](https://www.tensorflow.org/lite) +* [Teachable Machine Audio Project](https://teachablemachine.withgoogle.com/train/audio) diff --git a/lite/examples/sound_classification/android_legacy/app/build.gradle b/lite/examples/sound_classification/android_legacy/app/build.gradle new file mode 100644 index 00000000000..77b18df187d --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/build.gradle @@ -0,0 +1,66 @@ +apply plugin: 'com.android.application' +apply plugin: 'kotlin-android' +apply plugin: 'de.undercouch.download' + +android { + compileSdkVersion 30 + defaultConfig { + applicationId "org.tensorflow.lite.examples.soundclassifier" + minSdkVersion 23 + targetSdkVersion 30 + versionCode 1 + versionName "1.0" + testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" + } + + aaptOptions { + noCompress "tflite" + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + } + + buildFeatures { + viewBinding true + } + + compileOptions { + sourceCompatibility 1.8 + targetCompatibility 1.8 + } + + kotlinOptions { + jvmTarget = "1.8" + } +} + +// import DownloadModels task +project.ext.ASSET_DIR = projectDir.toString() + '/src/main/assets' + +// Download default models; if you wish to use your own models then +// place them in the "assets" directory and comment out below line. +apply from: 'download_model.gradle' + +dependencies { + implementation fileTree(dir: "libs", include: ["*.jar"]) + implementation "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version" + implementation "androidx.core:core-ktx:1.3.1" + implementation "androidx.appcompat:appcompat:1.2.0" + implementation "androidx.lifecycle:lifecycle-common-java8:2.2.0" + implementation "androidx.constraintlayout:constraintlayout:2.0.1" + implementation "androidx.recyclerview:recyclerview:1.1.0" + implementation "com.google.android.material:material:1.2.1" + + implementation "org.tensorflow:tensorflow-lite:2.3.0" + implementation "org.tensorflow:tensorflow-lite-select-tf-ops:2.3.0" + implementation "org.tensorflow:tensorflow-lite-support:0.1.0" + implementation "org.tensorflow:tensorflow-lite-metadata:0.1.0" + + testImplementation "junit:junit:4.13" + androidTestImplementation "androidx.test.ext:junit:1.1.2" + androidTestImplementation "androidx.test.espresso:espresso-core:3.3.0" +} \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/download_model.gradle b/lite/examples/sound_classification/android_legacy/app/download_model.gradle new file mode 100644 index 00000000000..31211010cf5 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/download_model.gradle @@ -0,0 +1,11 @@ +task downloadSoundClassificationModelFile(type: Download) { + src 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/sound_classification/snap_clap.tflite' + dest project.ext.ASSET_DIR + '/sound_classifier.tflite' + overwrite false +} + +tasks.whenTaskAdded { task -> + if ((task.name == 'assembleDebug') || (task.name == 'assembleRelease')) { + task.dependsOn 'downloadSoundClassificationModelFile' + } +} \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/proguard-rules.pro b/lite/examples/sound_classification/android_legacy/app/proguard-rules.pro new file mode 100644 index 00000000000..481bb434814 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# You can control the set of applied configuration files using the +# proguardFiles setting in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} + +# Uncomment this to preserve the line number information for +# debugging stack traces. +#-keepattributes SourceFile,LineNumberTable + +# If you keep the line number information, uncomment this to +# hide the original source file name. +#-renamesourcefileattribute SourceFile \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/AndroidManifest.xml b/lite/examples/sound_classification/android_legacy/app/src/main/AndroidManifest.xml new file mode 100644 index 00000000000..19750fc7d32 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/AndroidManifest.xml @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/assets/labels.txt b/lite/examples/sound_classification/android_legacy/app/src/main/assets/labels.txt new file mode 100644 index 00000000000..16b5383ae96 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/assets/labels.txt @@ -0,0 +1,3 @@ +0 snap +1 _background_noise_ +2 clap diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/MainActivity.kt b/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/MainActivity.kt new file mode 100644 index 00000000000..ce84a8045c2 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/MainActivity.kt @@ -0,0 +1,129 @@ +/* + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.tensorflow.lite.examples.soundclassifier + +import android.Manifest +import android.content.pm.PackageManager +import android.os.Build +import android.os.Bundle +import android.util.Log +import android.view.WindowManager +import androidx.annotation.RequiresApi +import androidx.appcompat.app.AppCompatActivity +import androidx.core.content.ContextCompat +import org.tensorflow.lite.examples.soundclassifier.databinding.ActivityMainBinding + +class MainActivity : AppCompatActivity() { + private val probabilitiesAdapter by lazy { ProbabilitiesAdapter() } + + private lateinit var soundClassifier: SoundClassifier + + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + + val binding = ActivityMainBinding.inflate(layoutInflater) + setContentView(binding.root) + + soundClassifier = SoundClassifier(this, SoundClassifier.Options()).also { + it.lifecycleOwner = this + } + + with(binding) { + recyclerView.apply { + setHasFixedSize(true) + adapter = probabilitiesAdapter.apply { + labelList = soundClassifier.labelList + } + } + + keepScreenOn(inputSwitch.isChecked) + inputSwitch.setOnCheckedChangeListener { _, isChecked -> + soundClassifier.isPaused = !isChecked + keepScreenOn(isChecked) + } + + overlapFactorSlider.value = soundClassifier.overlapFactor + overlapFactorSlider.addOnChangeListener { _, value, _ -> + soundClassifier.overlapFactor = value + } + } + + soundClassifier.probabilities.observe(this) { resultMap -> + if (resultMap.isEmpty() || resultMap.size > soundClassifier.labelList.size) { + Log.w(TAG, "Invalid size of probability output! (size: ${resultMap.size})") + return@observe + } + probabilitiesAdapter.probabilityMap = resultMap + probabilitiesAdapter.notifyDataSetChanged() + } + + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { + requestMicrophonePermission() + } else { + soundClassifier.start() + } + } + + override fun onTopResumedActivityChanged(isTopResumedActivity: Boolean) { + // Handles "top" resumed event on multi-window environment + if (isTopResumedActivity) { + soundClassifier.start() + } else { + soundClassifier.stop() + } + } + + override fun onRequestPermissionsResult( + requestCode: Int, + permissions: Array, + grantResults: IntArray + ) { + if (requestCode == REQUEST_RECORD_AUDIO) { + if (grantResults.isNotEmpty() && grantResults[0] == PackageManager.PERMISSION_GRANTED) { + Log.i(TAG, "Audio permission granted :)") + soundClassifier.start() + } else { + Log.e(TAG, "Audio permission not granted :(") + } + } + } + + @RequiresApi(Build.VERSION_CODES.M) + private fun requestMicrophonePermission() { + if (ContextCompat.checkSelfPermission( + this, + Manifest.permission.RECORD_AUDIO + ) == PackageManager.PERMISSION_GRANTED + ) { + soundClassifier.start() + } else { + requestPermissions(arrayOf(Manifest.permission.RECORD_AUDIO), REQUEST_RECORD_AUDIO) + } + } + + private fun keepScreenOn(enable: Boolean) = + if (enable) { + window.addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON) + } else { + window.clearFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON) + } + + companion object { + const val REQUEST_RECORD_AUDIO = 1337 + private const val TAG = "AudioDemo" + } +} diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/ProbabilitiesAdapter.kt b/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/ProbabilitiesAdapter.kt new file mode 100644 index 00000000000..7f9fbf51be4 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/ProbabilitiesAdapter.kt @@ -0,0 +1,72 @@ +/* + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.tensorflow.lite.examples.soundclassifier + +import android.animation.ObjectAnimator +import android.content.res.ColorStateList +import android.view.LayoutInflater +import android.view.ViewGroup +import android.view.animation.AccelerateDecelerateInterpolator +import androidx.recyclerview.widget.RecyclerView +import org.tensorflow.lite.examples.soundclassifier.databinding.ItemProbabilityBinding + +internal class ProbabilitiesAdapter : RecyclerView.Adapter() { + var labelList = emptyList() + var probabilityMap = mapOf() + + override fun onCreateViewHolder(parent: ViewGroup, viewType: Int): ViewHolder { + val binding = + ItemProbabilityBinding.inflate(LayoutInflater.from(parent.context), parent, false) + return ViewHolder(binding) + } + + override fun onBindViewHolder(holder: ViewHolder, position: Int) { + val label = labelList[position] + val probability = probabilityMap[label] ?: 0f + holder.bind(position, label, probability) + } + + override fun getItemCount() = labelList.size + + class ViewHolder(private val binding: ItemProbabilityBinding) : + RecyclerView.ViewHolder(binding.root) { + fun bind(position: Int, label: String, probability: Float) { + with(binding) { + labelTextView.text = label + progressBar.progressBackgroundTintList = progressColorPairList[position % 3].first + progressBar.progressTintList = progressColorPairList[position % 3].second + + val newValue = (probability * 100).toInt() + // If you don't want to animate, you can write like `progressBar.progress = newValue`. + val animation = + ObjectAnimator.ofInt(progressBar, "progress", progressBar.progress, newValue) + animation.duration = 100 + animation.interpolator = AccelerateDecelerateInterpolator() + animation.start() + } + } + + companion object { + /** List of pairs of background tint and progress tint */ + private val progressColorPairList = listOf( + ColorStateList.valueOf(0xfff9e7e4.toInt()) to ColorStateList.valueOf(0xffd97c2e.toInt()), + ColorStateList.valueOf(0xfff7e3e8.toInt()) to ColorStateList.valueOf(0xffc95670.toInt()), + ColorStateList.valueOf(0xffecf0f9.toInt()) to ColorStateList.valueOf(0xff714Fe7.toInt()), + ) + } + } +} diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/SoundClassifier.kt b/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/SoundClassifier.kt new file mode 100644 index 00000000000..e54c45981ad --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/java/org/tensorflow/lite/examples/soundclassifier/SoundClassifier.kt @@ -0,0 +1,419 @@ +/* + * Copyright 2021 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.tensorflow.lite.examples.soundclassifier + +import android.content.Context +import android.media.AudioFormat +import android.media.AudioRecord +import android.media.MediaRecorder +import android.os.SystemClock +import android.util.Log +import androidx.annotation.MainThread +import androidx.lifecycle.DefaultLifecycleObserver +import androidx.lifecycle.LifecycleOwner +import androidx.lifecycle.LiveData +import androidx.lifecycle.MutableLiveData +import java.io.BufferedReader +import java.io.IOException +import java.io.InputStreamReader +import java.nio.FloatBuffer +import java.util.Locale +import java.util.Timer +import java.util.TimerTask +import kotlin.concurrent.scheduleAtFixedRate +import kotlin.math.sin +import org.tensorflow.lite.Interpreter +import org.tensorflow.lite.support.common.FileUtil + +/** + * Performs classification on sound. + * + *

The API supports models which accept sound input via {@code AudioRecord} and one classification output tensor. + * The output of the recognition is emitted as LiveData of Map. + * + */ +class SoundClassifier(context: Context, private val options: Options = Options()) : + DefaultLifecycleObserver { + class Options constructor( + /** Path of the converted model label file, relative to the assets/ directory. */ + val metadataPath: String = "labels.txt", + /** Path of the converted .tflite file, relative to the assets/ directory. */ + val modelPath: String = "sound_classifier.tflite", + /** The required audio sample rate in Hz. */ + val sampleRate: Int = 44_100, + /** How many milliseconds to sleep between successive audio sample pulls. */ + val audioPullPeriod: Long = 50L, + /** Number of warm up runs to do after loading the TFLite model. */ + val warmupRuns: Int = 3, + /** Number of points in average to reduce noise. */ + val pointsInAverage: Int = 10, + /** Overlap factor of recognition period */ + var overlapFactor: Float = 0.8f, + /** Probability value above which a class is labeled as active (i.e., detected) the display. */ + var probabilityThreshold: Float = 0.3f, + ) + + var isRecording: Boolean = false + private set + + /** As the result of sound classification, this value emits map of probabilities */ + val probabilities: LiveData> + get() = _probabilities + private val _probabilities = MutableLiveData>() + + var isClosed: Boolean = true + private set + + /** + * LifecycleOwner instance to deal with RESUME, PAUSE and DESTROY events automatically. + * You can also handle those events by calling `start()`, `stop()` and `close()` methods + * manually. + */ + var lifecycleOwner: LifecycleOwner? = null + @MainThread + set(value) { + if (field === value) return + field?.lifecycle?.removeObserver(this) + field = value?.also { + it.lifecycle.addObserver(this) + } + } + + /** Overlap factor of recognition period */ + var overlapFactor: Float + get() = options.overlapFactor + set(value) { + options.overlapFactor = value.also { + recognitionPeriod = (1000L * (1 - value)).toLong() + } + } + + /** Probability value above which a class is labeled as active (i.e., detected) the display. */ + var probabilityThreshold: Float + get() = options.probabilityThreshold + set(value) { + options.probabilityThreshold = value + } + + /** Paused by user */ + var isPaused: Boolean = false + set(value) { + field = value + if (value) stop() else start() + } + + /** Names of the model's output classes. */ + lateinit var labelList: List + private set + + /** How many milliseconds between consecutive model inference calls. */ + private var recognitionPeriod = (1000L * (1 - overlapFactor)).toLong() + + /** The TFLite interpreter instance. */ + private lateinit var interpreter: Interpreter + + /** Audio length (in # of PCM samples) required by the TFLite model. */ + private var modelInputLength = 0 + + /** Number of output classes of the TFLite model. */ + private var modelNumClasses = 0 + + /** Used to hold the real-time probabilities predicted by the model for the output classes. */ + private lateinit var predictionProbs: FloatArray + + /** Latest prediction latency in milliseconds. */ + private var latestPredictionLatencyMs = 0f + + private var recognitionTask: TimerTask? = null + + /** Used to record audio samples. */ + private lateinit var audioRecord: AudioRecord + + /** Buffer that holds audio PCM sample that are fed to the TFLite model for inference. */ + private lateinit var inputBuffer: FloatBuffer + + init { + loadLabels(context) + setupInterpreter(context) + warmUpModel() + } + + override fun onResume(owner: LifecycleOwner) = start() + + override fun onPause(owner: LifecycleOwner) = stop() + + /** + * Starts sound classification, which triggers running of + * `recordingThread` and `recognitionThread`. + */ + fun start() { + if (!isPaused) { + startAudioRecord() + } + } + + /** + * Stops sound classification, which triggers interruption of + * `recognitionThread`. + */ + fun stop() { + if (isClosed || !isRecording) return + recognitionTask?.cancel() + + audioRecord.stop() + isRecording = false + + _probabilities.postValue(labelList.associateWith { 0f }) + } + + fun close() { + stop() + + if (isClosed) return + interpreter.close() + + isClosed = true + } + + /** Retrieve labels from "labels.txt" file */ + private fun loadLabels(context: Context) { + try { + val reader = + BufferedReader(InputStreamReader(context.assets.open(options.metadataPath))) + val wordList = mutableListOf() + reader.useLines { lines -> + lines.forEach { + wordList.add(it.split(" ").last()) + } + } + labelList = wordList.map { it.toTitleCase() } + } catch (e: IOException) { + Log.e(TAG, "Failed to read model ${options.metadataPath}: ${e.message}") + } + } + + private fun setupInterpreter(context: Context) { + interpreter = try { + val tfliteBuffer = FileUtil.loadMappedFile(context, options.modelPath) + Log.i(TAG, "Done creating TFLite buffer from ${options.modelPath}") + Interpreter(tfliteBuffer, Interpreter.Options()) + } catch (e: IOException) { + Log.e(TAG, "Failed to load TFLite model - ${e.message}") + return + } + + // Inspect input and output specs. + val inputShape = interpreter.getInputTensor(0).shape() + Log.i(TAG, "TFLite model input shape: ${inputShape.contentToString()}") + modelInputLength = inputShape[1] + + val outputShape = interpreter.getOutputTensor(0).shape() + Log.i(TAG, "TFLite output shape: ${outputShape.contentToString()}") + modelNumClasses = outputShape[1] + if (modelNumClasses != labelList.size) { + Log.e( + TAG, + "Mismatch between metadata number of classes (${labelList.size})" + + " and model output length ($modelNumClasses)" + ) + } + // Fill the array with NaNs initially. + predictionProbs = FloatArray(modelNumClasses) { Float.NaN } + + inputBuffer = FloatBuffer.allocate(modelInputLength) + } + + private fun warmUpModel() { + generateDummyAudioInput(inputBuffer) + for (n in 0 until options.warmupRuns) { + val t0 = SystemClock.elapsedRealtimeNanos() + + // Create input and output buffers. + val outputBuffer = FloatBuffer.allocate(modelNumClasses) + inputBuffer.rewind() + outputBuffer.rewind() + interpreter.run(inputBuffer, outputBuffer) + + Log.i( + TAG, + "Switches: Done calling interpreter.run(): %s (%.6f ms)".format( + outputBuffer.array().contentToString(), + (SystemClock.elapsedRealtimeNanos() - t0) / NANOS_IN_MILLIS + ) + ) + } + } + + private fun generateDummyAudioInput(inputBuffer: FloatBuffer) { + val twoPiTimesFreq = 2 * Math.PI.toFloat() * 1000f + for (i in 0 until modelInputLength) { + val x = i.toFloat() / (modelInputLength - 1) + inputBuffer.put(i, sin(twoPiTimesFreq * x.toDouble()).toFloat()) + } + } + + /** Start recording and triggers recognition. */ + @Synchronized + private fun startAudioRecord() { + if (isRecording) return + setupAudioRecord() + isClosed = false + isRecording = true + } + + private fun setupAudioRecord() { + var bufferSize = AudioRecord.getMinBufferSize( + options.sampleRate, + AudioFormat.CHANNEL_IN_MONO, + AudioFormat.ENCODING_PCM_16BIT + ) + Log.i(TAG, "min buffer size = $bufferSize") + if (bufferSize == AudioRecord.ERROR || bufferSize == AudioRecord.ERROR_BAD_VALUE) { + bufferSize = options.sampleRate * 2 + Log.w(TAG, "bufferSize has error or bad value") + } + // The buffer of AudioRecord should be larger than what model requires. + val modelRequiredBufferSize = 2 * modelInputLength * Short.SIZE_BYTES + if (bufferSize < modelRequiredBufferSize) { + bufferSize = modelRequiredBufferSize + } + Log.i(TAG, "bufferSize = $bufferSize") + audioRecord = AudioRecord( + // including MIC, UNPROCESSED, and CAMCORDER. + MediaRecorder.AudioSource.VOICE_RECOGNITION, + options.sampleRate, + AudioFormat.CHANNEL_IN_MONO, + AudioFormat.ENCODING_PCM_16BIT, + bufferSize + ) + if (audioRecord.state != AudioRecord.STATE_INITIALIZED) { + Log.e(TAG, "AudioRecord failed to initialize") + return + } + Log.i(TAG, "Successfully initialized AudioRecord") + + audioRecord.startRecording() + Log.i(TAG, "Successfully started AudioRecord recording") + + // Start recognition (model inference) thread. + startRecognition() + } + + private fun loadAudio(audioBuffer: ShortArray): Int { + when ( + val loadedSamples = audioRecord.read( + audioBuffer, 0, audioBuffer.size, AudioRecord.READ_NON_BLOCKING + ) + ) { + AudioRecord.ERROR_INVALID_OPERATION -> { + Log.w(TAG, "AudioRecord.ERROR_INVALID_OPERATION") + } + AudioRecord.ERROR_BAD_VALUE -> { + Log.w(TAG, "AudioRecord.ERROR_BAD_VALUE") + } + AudioRecord.ERROR_DEAD_OBJECT -> { + Log.w(TAG, "AudioRecord.ERROR_DEAD_OBJECT") + } + AudioRecord.ERROR -> { + Log.w(TAG, "AudioRecord.ERROR") + } + else -> { + return loadedSamples + } + } + // No new sample was loaded. + return 0 + } + + private fun startRecognition() { + if (modelInputLength <= 0 || modelNumClasses <= 0) { + Log.e(TAG, "Switches: Cannot start recognition because model is unavailable.") + return + } + + val circularBuffer = ShortArray(modelInputLength) + + var j = 0 // Indices for the circular buffer next write + + recognitionTask = Timer().scheduleAtFixedRate(recognitionPeriod, recognitionPeriod) task@{ + val outputBuffer = FloatBuffer.allocate(modelNumClasses) + val recordingBuffer = ShortArray(modelInputLength) + + // Load new audio samples + val sampleCounts = loadAudio(recordingBuffer) + if (sampleCounts == 0) { + return@task + } + + // Copy new data into the circular buffer + for (i in 0 until sampleCounts) { + circularBuffer[j] = recordingBuffer[i] + j = (j + 1) % circularBuffer.size + } + + // Feed data to the input buffer. + var samplesAreAllZero = true + for (i in 0 until modelInputLength) { + val s = if (i > options.pointsInAverage) { + ((i - options.pointsInAverage + 1)..i).map { + circularBuffer[(j + it) % modelInputLength] + } + .average() + } else { + circularBuffer[(i + j) % modelInputLength] + } + if (samplesAreAllZero && s.toInt() != 0) { + samplesAreAllZero = false + } + inputBuffer.put(i, s.toFloat()) + } + + if (samplesAreAllZero) { + Log.w(TAG, "No audio input: All audio samples are zero!") + return@task + } + val t0 = SystemClock.elapsedRealtimeNanos() + inputBuffer.rewind() + outputBuffer.rewind() + interpreter.run(inputBuffer, outputBuffer) + outputBuffer.rewind() + outputBuffer.get(predictionProbs) // Copy data to predictionProbs. + + val probList = predictionProbs.map { + if (it > probabilityThreshold) it else 0f + } + Log.i(TAG, "inference result: $probList") + _probabilities.postValue(labelList.zip(probList).toMap()) + + latestPredictionLatencyMs = + ((SystemClock.elapsedRealtimeNanos() - t0) / 1e6).toFloat() + } + } + + companion object { + private const val TAG = "SoundClassifier" + + /** Number of nanoseconds in a millisecond */ + private const val NANOS_IN_MILLIS = 1_000_000.toDouble() + } +} + +private fun String.toTitleCase() = + splitToSequence("_") + .map { it.capitalize(Locale.ROOT) } + .joinToString(" ") + .trim() diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/res/layout/activity_main.xml b/lite/examples/sound_classification/android_legacy/app/src/main/res/layout/activity_main.xml new file mode 100644 index 00000000000..92f47758929 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/res/layout/activity_main.xml @@ -0,0 +1,111 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/res/layout/item_probability.xml b/lite/examples/sound_classification/android_legacy/app/src/main/res/layout/item_probability.xml new file mode 100644 index 00000000000..49f2aeab7ee --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/res/layout/item_probability.xml @@ -0,0 +1,51 @@ + + + + + + + \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/res/values/colors.xml b/lite/examples/sound_classification/android_legacy/app/src/main/res/values/colors.xml new file mode 100644 index 00000000000..8c4017f0139 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/res/values/colors.xml @@ -0,0 +1,16 @@ + + + #BDBDBD + #9E9E9E + #757575 + #616161 + #424242 + #212121 + + #344955 + #232F34 + #4A6572 + #F9AA33 + + #5d85c8 + \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/res/values/strings.xml b/lite/examples/sound_classification/android_legacy/app/src/main/res/values/strings.xml new file mode 100644 index 00000000000..abfa5836c98 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/res/values/strings.xml @@ -0,0 +1,6 @@ + + TFL Sound Classifier + Input: + Overlap Factor + TensorFlow Lite Logo + \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/app/src/main/res/values/styles.xml b/lite/examples/sound_classification/android_legacy/app/src/main/res/values/styles.xml new file mode 100644 index 00000000000..d1e1e6c6720 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/app/src/main/res/values/styles.xml @@ -0,0 +1,10 @@ + + + + \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/build.gradle b/lite/examples/sound_classification/android_legacy/build.gradle new file mode 100644 index 00000000000..ccbe3ced507 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/build.gradle @@ -0,0 +1,30 @@ +// Top-level build file where you can add configuration options common to all sub-projects/modules. +buildscript { + ext { + kotlin_version = "1.4.10" + } + repositories { + google() + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:4.0.1' + classpath 'de.undercouch:gradle-download-task:4.0.2' // to download model + classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" + } +} + +allprojects { + repositories { + google() + mavenCentral() + maven { + name 'ossrh-snapshot' + url 'http://oss.sonatype.org/content/repositories/snapshots' + } + } +} + +task clean(type: Delete) { + delete rootProject.buildDir +} \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/gradle.properties b/lite/examples/sound_classification/android_legacy/gradle.properties new file mode 100644 index 00000000000..30dbeb4321c --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/gradle.properties @@ -0,0 +1,21 @@ +# Project-wide Gradle settings. +# IDE (e.g. Android Studio) users: +# Gradle settings configured through the IDE *will override* +# any settings specified in this file. +# For more details on how to configure your build environment visit +# http://www.gradle.org/docs/current/userguide/build_environment.html +# Specifies the JVM arguments used for the daemon process. +# The setting is particularly useful for tweaking memory settings. +org.gradle.jvmargs=-Xmx2048m +# When configured, Gradle will run in incubating parallel mode. +# This option should only be used with decoupled projects. More details, visit +# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects +# org.gradle.parallel=true +# AndroidX package structure to make it clearer which packages are bundled with the +# Android operating system, and which are packaged with your app"s APK +# https://developer.android.com/topic/libraries/support-library/androidx-rn +android.useAndroidX=true +# Automatically convert third-party libraries to use AndroidX +android.enableJetifier=false +# Kotlin code style for this project: "official" or "obsolete": +kotlin.code.style=official \ No newline at end of file diff --git a/lite/examples/sound_classification/android_legacy/settings.gradle b/lite/examples/sound_classification/android_legacy/settings.gradle new file mode 100644 index 00000000000..0a87cd63b06 --- /dev/null +++ b/lite/examples/sound_classification/android_legacy/settings.gradle @@ -0,0 +1,2 @@ +rootProject.name = "TFLite Sound Classifier Demo App" +include ':app' \ No newline at end of file