You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Browser plays the audio sent by server (This works fine)
On click of the (Stop) button on browser, the renegotiation happens, and on server side, we stop the sending track (which sends a.mp3) and add a new track (which should send b.mp3)
I'm doing re-negotiation as seen in client.js code below:
negotiate(1)
which sends offer & returns answer. Interesting thing is the first time negotiation happens, things work fine, and then 4th step onwards, the ICE failing state starts coming, and as a result I cannot hear audio b.mp3 being played on browser.
server.py
import argparse
import asyncio
import json
import logging
import os
import ssl
import uuid
import socketio
from aiohttp import web
from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription, RTCRtpSender
from aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder, MediaRelay
from aiortc.mediastreams import MediaStreamError
from av import AudioFrame
import asyncio
import fractions
import time
from av.frame import Frame
from aiortc.mediastreams import VideoStreamTrack
from aiortc.rtcrtpreceiver import RemoteStreamTrack
ROOT = os.path.dirname(__file__)
logger = logging.getLogger("pc")
pcs = set()
relay = MediaRelay()
app = web.Application()
# Create a Socket.IO server instance
sio = socketio.AsyncServer(async_mode='aiohttp')
sio.attach(app)
async def removeAndAddTrack(pc: RTCPeerConnection):
print("Came inside removeAndAddTrack")
sender:RTCRtpSender = pc.getSenders()[0]
await sender.stop()
print("Stopped the sender")
pc.addTrack(MediaPlayer(os.path.join(ROOT, "sample-9s.wav")).audio)
print("Added a new track")
async def index(request):
content = open(os.path.join(ROOT, "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def javascript(request):
content = open(os.path.join(ROOT, "client.js"), "r").read()
return web.Response(content_type="application/javascript", text=content)
@sio.event
async def renegotiate(sid, offer):
session = await sio.get_session(sid)
pc:RTCPeerConnection = session["peer"]
sender = next((sender for sender in pc.getSenders()), None)
print("RENEGOTIATE ----------")
if sender:
print("Came inside removeAndAddTrack")
sender:RTCRtpSender = pc.getSenders()[0]
await sender.stop()
print("Stopped the sender")
pc.addTrack(MediaPlayer(os.path.join(ROOT, "b.mp3")).audio)
# Set local & remote session
offersdp = RTCSessionDescription(sdp=offer, type="offer")
await pc.setRemoteDescription(offersdp)
print("OFFER : ------------")
print(offersdp)
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
print("ANSWER : ------------")
print(answer)
await sio.emit("answer", answer.sdp)
@sio.event
async def offer(sid, offer_sdp):
offer = RTCSessionDescription(sdp=offer_sdp, type='offer')
pc = RTCPeerConnection()
await sio.save_session(sid, {"peer" : pc})
pc_id = "PeerConnection(%s)" % uuid.uuid4()
pcs.add(pc)
# Audio Recorder for incoming audio track
audio_rec = None
# Video Recorder for incoming video track
video_rec = None
# All the incoming audio tracks
incoming_audio_tracks = []
def log_info(msg, *args):
logger.info(pc_id + " " + msg, *args)
#if args.record_to:
if True:
# Record the incoming track stream to wav file locally
audio_rec = MediaRecorder(os.path.join(ROOT, "recording-step-1.mp3"))
else:
audio_rec = MediaBlackhole()
# Initialise mediarecorder
video_rec = MediaRecorder(
os.path.join(ROOT, "video-recording.mp4"))
@pc.on("connectionstatechange")
async def on_connectionstatechange():
log_info("Connection state is %s", pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
pcs.discard(pc)
@pc.on("track")
def on_track(track):
log_info("Track %s received", track.kind)
if track.kind == "audio":
incoming_audio_tracks.append(track)
# Start to send silence empty audio in outgoing track stream
pc.addTrack(MediaPlayer(os.path.join(ROOT, "a.mp3")).audio)
# Start recording incoming microphone audio stream into recording wav file
audio_rec.addTrack(track)
@track.on("ended")
async def on_ended():
log_info("Track %s ended", track.kind)
if audio_rec:
await audio_rec.stop()
if video_rec:
await video_rec.stop()
# handle offer
await pc.setRemoteDescription(offer)
print("Added Remote Desc")
if audio_rec:
await audio_rec.start()
if video_rec:
await video_rec.start()
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
print("Adding Local Desc")
print(answer)
await sio.emit("answer", pc.localDescription.sdp)
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
# Socket.IO event handler for the 'connect' event
@sio.on('connect')
async def connect(sid, environ):
print('Connected:', sid)
# Socket.IO event handler for the 'disconnect' event
@sio.on('disconnect')
async def disconnect(sid):
print('Disconnected:', sid)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="WebRTC audio / video / data-channels demo"
)
parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
parser.add_argument(
"--host", default="0.0.0.0", help="Host for HTTP server (default: 0.0.0.0)"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--record-to", help="Write received media to a file.")
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if args.cert_file:
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(args.cert_file, args.key_file)
else:
ssl_context = None
app.on_shutdown.append(on_shutdown)
app.router.add_get("/", index)
app.router.add_get("/client.js", javascript)
#app.router.add_post("/offer", offer)
web.run_app(
app, access_log=None, host=args.host, port=args.port, ssl_context=ssl_context
)
client.js
// get DOM elements
var dataChannelLog = document.getElementById('data-channel'),
iceConnectionLog = document.getElementById('ice-connection-state'),
iceGatheringLog = document.getElementById('ice-gathering-state'),
signalingLog = document.getElementById('signaling-state');
// Initialize Socket.IO connection
var socket = io();
// peer connection
var pc = null;
// data channel
var dc = null, dcInterval = null;
function createPeerConnection() {
var config = {
sdpSemantics: 'unified-plan'
};
if (document.getElementById('use-stun').checked) {
config.iceServers = [{ urls: ['stun:stun.l.google.com:19302'] }];
}
pc = new RTCPeerConnection(config);
// register some listeners to help debugging
pc.addEventListener('icegatheringstatechange', () => {
iceGatheringLog.textContent += ' -> ' + pc.iceGatheringState;
}, false);
iceGatheringLog.textContent = pc.iceGatheringState;
pc.addEventListener('iceconnectionstatechange', () => {
iceConnectionLog.textContent += ' -> ' + pc.iceConnectionState;
}, false);
iceConnectionLog.textContent = pc.iceConnectionState;
pc.addEventListener('signalingstatechange', () => {
signalingLog.textContent += ' -> ' + pc.signalingState;
}, false);
signalingLog.textContent = pc.signalingState;
// connect audio / video
pc.addEventListener('track', (evt) => {
console.log("Came in event listener for track event");
if (evt.track.kind == 'audio') {
document.getElementById('audio').srcObject = evt.streams[0];
console.log("Added remote audio stream");
}
});
return pc;
}
function enumerateInputDevices() {
const populateSelect = (select, devices) => {
let counter = 1;
devices.forEach((device) => {
const option = document.createElement('option');
option.value = device.deviceId;
option.text = device.label || ('Device #' + counter);
select.appendChild(option);
counter += 1;
});
};
navigator.mediaDevices.enumerateDevices().then((devices) => {
populateSelect(
document.getElementById('audio-input'),
devices.filter((device) => device.kind == 'audioinput')
);
populateSelect(
document.getElementById('video-input'),
devices.filter((device) => device.kind == 'videoinput')
);
}).catch((e) => {
alert(e);
});
}
function negotiate(again = 0) {
return pc.createOffer().then((offer) => {
return pc.setLocalDescription(offer);
}).then(() => {
// wait for ICE gathering to complete
return new Promise((resolve) => {
if (pc.iceGatheringState === 'complete') {
resolve();
} else {
function checkState() {
if (pc.iceGatheringState === 'complete') {
pc.removeEventListener('icegatheringstatechange', checkState);
resolve();
}
}
pc.addEventListener('icegatheringstatechange', checkState);
}
});
}).then(() => {
var offer = pc.localDescription;
var codec;
codec = document.getElementById('audio-codec').value;
if (codec !== 'default') {
offer.sdp = sdpFilterCodec('audio', codec, offer.sdp);
}
codec = document.getElementById('video-codec').value;
if (codec !== 'default') {
offer.sdp = sdpFilterCodec('video', codec, offer.sdp);
}
document.getElementById('offer-sdp').textContent = offer.sdp;
if (again == 0) {
socket.emit("offer", offer.sdp);
} else {
socket.emit("renegotiate", offer.sdp)
}
}).catch((e) => {
alert(e);
});
}
socket.on('answer', function (sdp_answer) {
console.log("----- Answer ------")
console.log(sdp_answer)
document.getElementById('answer-sdp').textContent = sdp_answer;
pc.setRemoteDescription({type:"answer", sdp: sdp_answer});
});
// Handle connection event
socket.on('connect', () => {
console.log('Connected to server');
});
// Handle disconnection event
socket.on('disconnect', () => {
console.log('Disconnected from server');
});
function start() {
document.getElementById('start').style.display = 'none';
pc = createPeerConnection();
pc.onnegotiationneeded = e => {
console.log("Inside on negotiation needed");
if (pc.signalingState != "stable") return;
}
var time_start = null;
const current_stamp = () => {
if (time_start === null) {
time_start = new Date().getTime();
return 0;
} else {
return new Date().getTime() - time_start;
}
};
if (document.getElementById('use-datachannel').checked) {
var parameters = JSON.parse(document.getElementById('datachannel-parameters').value);
dc = pc.createDataChannel('chat', parameters);
dc.addEventListener('close', () => {
clearInterval(dcInterval);
dataChannelLog.textContent += '- close\n';
});
dc.addEventListener('open', () => {
dataChannelLog.textContent += '- open\n';
dcInterval = setInterval(() => {
var message = 'ping ' + current_stamp();
dataChannelLog.textContent += '> ' + message + '\n';
dc.send(message);
}, 1000);
});
dc.addEventListener('message', (evt) => {
dataChannelLog.textContent += '< ' + evt.data + '\n';
if (evt.data.substring(0, 4) === 'pong') {
var elapsed_ms = current_stamp() - parseInt(evt.data.substring(5), 10);
dataChannelLog.textContent += ' RTT ' + elapsed_ms + ' ms\n';
}
if (evt.data.substring(0, 10) === 'trackended') {
// This is received from python when the audio wav file streamed
// is completed
}
if (evt.data.substring(0, 11) === 'renegotiate') {
const offer = evt.data.substring(11);
pc.createAnswer()
.then((answer) => {
return pc.setRemoteDescription(new RTCSessionDescription({ type: 'offer', sdp: offer }))
.then(() => {
// Set local description first
return pc.setLocalDescription(answer);
})
.then(() => {
const message = 'answer' + pc.localDescription.sdp;
dc.send(message);
});
})
.catch((error) => {
console.log(offer);
console.error('Failed to renegotiate:', error);
});
}
});
}
// Build media constraints.
const constraints = {
audio: false,
video: false
};
if (document.getElementById('use-audio').checked) {
const audioConstraints = {};
const device = document.getElementById('audio-input').value;
if (device) {
audioConstraints.deviceId = { exact: device };
}
constraints.audio = Object.keys(audioConstraints).length ? audioConstraints : true;
}
if (document.getElementById('use-video').checked) {
const videoConstraints = {};
const device = document.getElementById('video-input').value;
if (device) {
videoConstraints.deviceId = { exact: device };
}
const resolution = document.getElementById('video-resolution').value;
if (resolution) {
const dimensions = resolution.split('x');
videoConstraints.width = parseInt(dimensions[0], 0);
videoConstraints.height = parseInt(dimensions[1], 0);
}
// Specify frame rate here
videoConstraints.frameRate = { ideal: 30, max: 30 };
constraints.video = Object.keys(videoConstraints).length ? videoConstraints : true;
}
// Acquire media and start negociation.
if (constraints.audio || constraints.video) {
if (constraints.video) {
document.getElementById('media').style.display = 'block';
}
navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
document.getElementById('video').srcObject = stream;
stream.getTracks().forEach((track) => {
pc.addTrack(track, stream);
});
return negotiate();
}, (err) => {
alert('Could not acquire media: ' + err);
});
} else {
negotiate();
}
document.getElementById('stop').style.display = 'inline-block';
}
function restart() {
document.getElementById('restart').style.display = 'none';
}
function stop() {
document.getElementById('stop').style.display = 'none';
negotiate(1)
document.getElementById('restart').style.display = 'inline-block';
}
function sdpFilterCodec(kind, codec, realSdp) {
var allowed = []
var rtxRegex = new RegExp('a=fmtp:(\\d+) apt=(\\d+)\r$');
var codecRegex = new RegExp('a=rtpmap:([0-9]+) ' + escapeRegExp(codec))
var videoRegex = new RegExp('(m=' + kind + ' .*?)( ([0-9]+))*\\s*$')
var lines = realSdp.split('\n');
var isKind = false;
for (var i = 0; i < lines.length; i++) {
if (lines[i].startsWith('m=' + kind + ' ')) {
isKind = true;
} else if (lines[i].startsWith('m=')) {
isKind = false;
}
if (isKind) {
var match = lines[i].match(codecRegex);
if (match) {
allowed.push(parseInt(match[1]));
}
match = lines[i].match(rtxRegex);
if (match && allowed.includes(parseInt(match[2]))) {
allowed.push(parseInt(match[1]));
}
}
}
var skipRegex = 'a=(fmtp|rtcp-fb|rtpmap):([0-9]+)';
var sdp = '';
isKind = false;
for (var i = 0; i < lines.length; i++) {
if (lines[i].startsWith('m=' + kind + ' ')) {
isKind = true;
} else if (lines[i].startsWith('m=')) {
isKind = false;
}
if (isKind) {
var skipMatch = lines[i].match(skipRegex);
if (skipMatch && !allowed.includes(parseInt(skipMatch[2]))) {
continue;
} else if (lines[i].match(videoRegex)) {
sdp += lines[i].replace(videoRegex, '$1 ' + allowed.join(' ')) + '\n';
} else {
sdp += lines[i] + '\n';
}
} else {
sdp += lines[i] + '\n';
}
}
return sdp;
}
function escapeRegExp(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
}
enumerateInputDevices();
I have implemented audio transfer over webrtc using aiortc on python backend (Heavily inspired from server folder in examples). The flow is like so:
pc.addTrack(MediaPlayer(os.path.join(ROOT, "a.mp3")).audio)
pc.addTrack(MediaPlayer(os.path.join(ROOT, "b.mp3")).audio)
All works fine until step 4, at which point I get the following error on server:
I'm doing re-negotiation as seen in client.js code below:
which sends offer & returns answer. Interesting thing is the first time negotiation happens, things work fine, and then 4th step onwards, the ICE failing state starts coming, and as a result I cannot hear audio b.mp3 being played on browser.
server.py
client.js
index.html
On step 4, the SDP for offer and answer look like so:
Offer:
SDP Answer:
The text was updated successfully, but these errors were encountered: