blob: 3cba73e72f77784c392d8a0069b3ffdef587374f [file] [log] [blame]
/*
* libjingle
* Copyright 2004--2011, Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// shhhhh{
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
// shhhhh}
#ifdef HAVE_GIPS
#include "talk/session/phone/gipsmediaengine.h"
#include <algorithm>
#include <cstdio>
#include <string>
#include <vector>
#include "talk/base/base64.h"
#include "talk/base/byteorder.h"
#include "talk/base/common.h"
#include "talk/base/helpers.h"
#include "talk/base/logging.h"
#include "talk/base/stringencode.h"
#include "talk/base/stringutils.h"
#ifdef WIN32
#include <objbase.h> // NOLINT
#endif
namespace cricket {
// For Linux/Mac, using the default device is done by specifying index 0 for
// VoE 4.0 and not -1 (which was the case for VoE 3.5).
//
// On Windows Vista and newer, Microsoft introduced the concept of "Default
// Communications Device". This means that there are two types of default
// devices (old Wave Audio style default and Default Communications Device).
//
// On Windows systems which only support Wave Audio style default, uses either
// -1 or 0 to select the default device.
//
// On Windows systems which support both "Default Communication Device" and
// old Wave Audio style default, use -1 for Default Communications Device and
// -2 for Wave Audio style default, which is what we want to use for clips.
// It's not clear yet whether the -2 index is handled properly on other OSes.
#ifdef WIN32
static const int kDefaultAudioDeviceId = -1;
static const int kDefaultSoundclipDeviceId = -2;
#else
static const int kDefaultAudioDeviceId = 0;
#endif
// extension header for audio levels, as defined in
// http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-03
static const char kRtpAudioLevelHeaderExtension[] =
"urn:ietf:params:rtp-hdrext:ssrc-audio-level";
static void GipsLogMultiline(talk_base::LoggingSeverity sev, char* text) {
const char* delim = "\r\n";
for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
LOG_V(sev) << tok;
}
}
// GipsVoiceEngine
const GipsVoiceEngine::CodecPref GipsVoiceEngine::kCodecPrefs[] = {
{ "ISAC", 16000 },
{ "ISAC", 32000 },
{ "ISACLC", 16000 },
{ "speex", 16000 },
{ "IPCMWB", 16000 },
{ "G722", 16000 },
{ "iLBC", 8000 },
{ "speex", 8000 },
{ "GSM", 8000 },
{ "EG711U", 8000 },
{ "EG711A", 8000 },
{ "PCMU", 8000 },
{ "PCMA", 8000 },
{ "CN", 32000 },
{ "CN", 16000 },
{ "CN", 8000 },
{ "red", 8000 },
{ "telephone-event", 8000 },
};
class GipsSoundclipMedia : public SoundclipMedia {
public:
explicit GipsSoundclipMedia(GipsVoiceEngine *engine)
: engine_(engine), gips_channel_(-1) {
engine_->RegisterSoundclip(this);
}
virtual ~GipsSoundclipMedia() {
engine_->UnregisterSoundclip(this);
if (gips_channel_ != -1) {
if (engine_->gips_sc()->base()->GIPSVE_DeleteChannel(gips_channel_)
== -1) {
LOG_GIPSERR1(DeleteChannel, gips_channel_);
}
}
}
bool Init() {
gips_channel_ = engine_->gips_sc()->base()->GIPSVE_CreateChannel();
if (gips_channel_ == -1) {
LOG_GIPSERR0(CreateChannel);
return false;
}
return true;
}
bool Enable() {
if (engine_->gips_sc()->base()->GIPSVE_StartPlayout(gips_channel_) == -1) {
LOG_GIPSERR1(StartPlayout, gips_channel_);
return false;
}
return true;
}
bool Disable() {
if (engine_->gips_sc()->base()->GIPSVE_StopPlayout(gips_channel_) == -1) {
LOG_GIPSERR1(StopPlayout, gips_channel_);
return false;
}
return true;
}
virtual bool PlaySound(const char *buf, int len, int flags) {
// Must stop playing the current sound (if any), because we are about to
// modify the stream.
if (engine_->gips_sc()->file()->GIPSVE_StopPlayingFileLocally(gips_channel_)
== -1) {
LOG_GIPSERR1(StopPlayingFileLocally, gips_channel_);
return false;
}
if (buf) {
stream_.reset(new GipsSoundclipStream(buf, len));
stream_->set_loop((flags & SF_LOOP) != 0);
stream_->Rewind();
// Play it.
if (engine_->gips_sc()->file()->GIPSVE_StartPlayingFileLocally(
gips_channel_, stream_.get()) == -1) {
LOG_GIPSERR2(StartPlayingFileLocally, gips_channel_, stream_.get());
LOG(LS_ERROR) << "Unable to start soundclip";
return false;
}
} else {
stream_.reset();
}
return true;
}
int GetLastGipsError() { return engine_->gips_sc()->error(); }
private:
GipsVoiceEngine *engine_;
int gips_channel_;
talk_base::scoped_ptr<GipsSoundclipStream> stream_;
};
GipsVoiceEngine::GipsVoiceEngine()
: gips_(new GipsWrapper()),
gips_sc_(new GipsWrapper()),
tracing_(new GipsTraceWrapper()),
log_level_(kDefaultLogSeverity),
is_dumping_aec_(false),
desired_local_monitor_enable_(false) {
Construct();
}
GipsVoiceEngine::GipsVoiceEngine(
GipsWrapper* gips, GipsWrapper* gips_sc, GipsTraceWrapper* tracing)
: gips_(gips),
gips_sc_(gips_sc),
tracing_(tracing),
log_level_(kDefaultLogSeverity),
is_dumping_aec_(false),
desired_local_monitor_enable_(false) {
Construct();
}
void GipsVoiceEngine::Construct() {
initialized_ = false;
LOG(LS_VERBOSE) << "GipsVoiceEngine::GipsVoiceEngine";
ApplyLogging("");
if (tracing_->SetTraceCallback(this) == -1) {
LOG_GIPSERR0(SetTraceCallback);
}
if (gips_->base()->GIPSVE_RegisterVoiceEngineObserver(*this) == -1) {
LOG_GIPSERR0(RegisterVoiceEngineObserver);
}
// Clear the default agc state.
memset(&default_agc_config_, 0, sizeof(default_agc_config_));
// Load our audio codec list
LOG(LS_INFO) << "GIPS VoiceEngine codecs:";
int ncodecs = gips_->codec()->GIPSVE_NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
GIPS_CodecInst gcodec;
if (gips_->codec()->GIPSVE_GetCodec(i, gcodec) >= 0) {
int pref = GetCodecPreference(gcodec.plname, gcodec.plfreq);
if (pref != -1) {
if (gcodec.rate == -1) gcodec.rate = 0;
AudioCodec codec(gcodec.pltype, gcodec.plname, gcodec.plfreq,
gcodec.rate, gcodec.channels, pref);
LOG(LS_INFO) << gcodec.plname << "/" << gcodec.plfreq << "/"
<< gcodec.channels << " " << gcodec.pltype;
codecs_.push_back(codec);
}
}
}
// Make sure they are in local preference order
std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
}
GipsVoiceEngine::~GipsVoiceEngine() {
LOG(LS_VERBOSE) << "GipsVoiceEngine::~GipsVoiceEngine";
if (gips_->base()->GIPSVE_DeRegisterVoiceEngineObserver() == -1) {
LOG_GIPSERR0(DeRegisterVoiceEngineObserver);
}
tracing_->SetTraceCallback(NULL);
}
bool GipsVoiceEngine::Init() {
LOG(LS_INFO) << "GipsVoiceEngine::Init";
bool res = InitInternal();
if (res) {
LOG(LS_INFO) << "GipsVoiceEngine::Init Done!";
} else {
LOG(LS_ERROR) << "GipsVoiceEngine::Init failed";
Terminate();
}
return res;
}
bool GipsVoiceEngine::InitInternal() {
// Temporarily turn logging level up for the GIPSVE_Init call
int old_level = log_level_;
log_level_ = talk_base::_min(log_level_,
static_cast<int>(talk_base::LS_INFO));
ApplyLogging("");
#if defined(LINUX) && !defined(HAVE_LIBPULSE)
gips_->hw()->GIPSVE_SetAudioDeviceLayer(GIPS_AUDIO_LINUX_ALSA);
#endif
#ifdef WIN32
// NOTE(fbarchard): Enable this to test WAVE on Windows 7
// gips_->hw()->GIPSVE_SetAudioDeviceLayer(GIPS_AUDIO_WINDOWS_WAVE);
#endif
// shhhhh{
#ifdef ANDROID
const char AUTH_KEY[] =
"====YUtFWRAAAAADBtIHgAAAAAEAAAAcAAAAAQBHU0dsb2J"
"hbCBJUCBTb3VuZAACAAAADgAAAGdvb2dsZQAAAABGAAAAEkxO38dEtVtksyvvIn"
"eiDc8yJtfB038G6bZQ4zNByUrAnpd9znXNRIe9k4Tjrjn0q7ztOMs2Ge6pqMEYq"
"aAEANWCYjZ1kKXkBNJqfZq6TGn894PUJXYKirNVozxjT1E+GzB9xCEDVuTowZIJ"
"T7qFBDr2TSCBhg2rA5JTT7Y/+l01DHxOzvOtdvdQucVyp3QbNZlwdFLJEuevKre"
"ZnfH+ChYvHXrPDWyJPNvnl9382VFbk/5ZVbdoCaHi4ISDWn9C=6wIv";
if (gips_->base()->GIPSVE_Authenticate(AUTH_KEY, strlen(AUTH_KEY)) != 0) {
LOG_GIPSERR0(Authenticate);
return false;
}
#endif
// shhhhh}
// Init GIPS VoiceEngine, enabling AEC logging if specified in SetLogging.
if (gips_->base()->GIPSVE_Init() == -1) {
LOG_GIPSERR0_EX(Init, gips_->error());
return false;
}
// Restore the previous log level and apply the log filter.
log_level_ = old_level;
ApplyLogging(log_filter_);
// Log the GIPS version info
char buffer[1024] = "";
gips_->base()->GIPSVE_GetVersion(buffer);
LOG(LS_INFO) << "GIPS VoiceEngine Version:";
GipsLogMultiline(talk_base::LS_INFO, buffer);
// Turn on AEC, AGC, NS and typing detection by default.
int options = MediaEngineInterface::ECHO_CANCELLATION
| MediaEngineInterface::AUTO_GAIN_CONTROL
| MediaEngineInterface::NOISE_SUPPRESSION
#if !defined(IOS) && !defined(ANDROID)
| MediaEngineInterface::TYPING_DETECTION
#endif
;
if (!SetOptions(options)) {
return false;
}
// Save the default AGC configuration settings.
if (gips_->vqe()->GIPSVE_GetAGCConfig(default_agc_config_) == -1) {
LOG_GIPSERR0(GetAGCConfig);
return false;
}
// Print our codec list again for the call diagnostic log
LOG(LS_INFO) << "GIPS VoiceEngine codecs:";
for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
it != codecs_.end(); ++it) {
LOG(LS_INFO) << it->name << "/" << it->clockrate << "/"
<< it->channels << " " << it->id;
}
#if defined(LINUX) && !defined(HAVE_LIBPULSE)
gips_sc_->hw()->GIPSVE_SetAudioDeviceLayer(GIPS_AUDIO_LINUX_ALSA);
#endif
// shhhhh{
#ifdef ANDROID
if (gips_sc_->base()->GIPSVE_Authenticate(AUTH_KEY, strlen(AUTH_KEY)) != 0) {
LOG_GIPSERR0(Authenticate);
return false;
}
#endif
// shhhhh}
// Initialize the GIPS instance that we'll use to play out sound clips.
if (gips_sc_->base()->GIPSVE_Init() == -1) {
LOG_GIPSERR0_EX(Init, gips_sc_->error());
return false;
}
// On Windows, tell it to use the default sound (not communication) devices.
// First check whether there is a valid sound device for playback.
// TODO(juberti): Clean this up when we support setting the soundclip device.
#ifdef WIN32
int num_of_devices = 0;
if (gips_sc_->hw()->GIPSVE_GetNumOfPlayoutDevices(num_of_devices) != -1 &&
num_of_devices > 0) {
if (gips_sc_->hw()->GIPSVE_SetPlayoutDevice(kDefaultSoundclipDeviceId)
== -1) {
LOG_GIPSERR1_EX(SetPlayoutDevice, kDefaultSoundclipDeviceId,
gips_sc_->error());
return false;
}
} else {
LOG(LS_WARNING) << "No valid sound playout device found.";
}
#endif
initialized_ = true;
return true;
}
void GipsVoiceEngine::Terminate() {
LOG(LS_INFO) << "GipsVoiceEngine::Terminate";
initialized_ = false;
if (is_dumping_aec_) {
if (gips_->vqe()->GIPSVE_StopDebugRecording() == -1) {
LOG_GIPSERR0(StopDebugRecording);
}
is_dumping_aec_ = false;
}
gips_sc_->base()->GIPSVE_Terminate();
gips_->base()->GIPSVE_Terminate();
desired_local_monitor_enable_ = false;
}
int GipsVoiceEngine::GetCapabilities() {
return AUDIO_SEND | AUDIO_RECV;
}
VoiceMediaChannel *GipsVoiceEngine::CreateChannel() {
GipsVoiceMediaChannel* ch = new GipsVoiceMediaChannel(this);
if (!ch->valid()) {
delete ch;
ch = NULL;
}
return ch;
}
SoundclipMedia *GipsVoiceEngine::CreateSoundclip() {
GipsSoundclipMedia *soundclip = new GipsSoundclipMedia(this);
if (!soundclip->Init() || !soundclip->Enable()) {
delete soundclip;
return NULL;
}
return soundclip;
}
bool GipsVoiceEngine::SetOptions(int options) {
// GIPS tells us that "auto" mode doesn't work too well, so we don't use it.
bool aec = (options & MediaEngineInterface::ECHO_CANCELLATION) ? true : false;
bool agc = (options & MediaEngineInterface::AUTO_GAIN_CONTROL) ? true : false;
bool ns = (options & MediaEngineInterface::NOISE_SUPPRESSION) ? true : false;
#if !defined(IOS) && !defined(ANDROID)
bool typing_detection =
(options & MediaEngineInterface::TYPING_DETECTION) ? true : false;
if (gips_->vqe()->GIPSVE_SetECStatus(aec) == -1) {
LOG_GIPSERR1(SetECStatus, aec);
return false;
}
if (gips_->vqe()->GIPSVE_SetAGCStatus(agc) == -1) {
LOG_GIPSERR1(SetAGCStatus, agc);
return false;
}
// TODO(pthatcher): The existing Talk Plugin's on-disk preference settings
// won't include NOISE_SUPPRESSION or TYPING_DETECTION, so those flags won't
// get passed in. For desktop, we hard-code NS and typing detection to
// true for now, until we can solve this.
if (gips_->vqe()->GIPSVE_SetNSStatus(true) == -1) {
LOG_GIPSERR1(SetNSStatus, ns);
return false;
}
if (gips_->vqe()->GIPSVE_SetTypingDetectionStatus(true) == -1) {
// In case of error, log the info and continue
LOG_GIPSERR1(SetTypingDetectionStatus, typing_detection);
}
#else
if (gips_->vqe()->GIPSVE_SetECStatus(aec, GIPS_EC_AECM) == -1) {
LOG_GIPSERR2(SetECStatus, aec, GIPS_EC_AECM);
return false;
}
if (aec) {
// Use speakerphone mode with comfort noise generation for mobile.
if (gips_->vqe()->GIPSVE_SetAECMMode(GIPS_AECM_SPEAKERPHONE, true) != 0) {
LOG_GIPSERR2(SetAECMMode, GIPS_AECM_SPEAKERPHONE, true);
}
}
// On mobile, GIPS recommends fixed AGC (not adaptive)
if (gips_->vqe()->GIPSVE_SetAGCStatus(agc, GIPS_AGC_FIXED_DIGITAL) == -1) {
LOG_GIPSERR2(SetAGCStatus, agc, GIPS_AGC_FIXED_DIGITAL);
return false;
}
// On mobile, GIPS recommends moderate aggressiveness.
if (gips_->vqe()->GIPSVE_SetNSStatus(ns,
GIPS_NS_MODERATE_SUPPRESSION) == -1) {
LOG_GIPSERR2(SetNSStatus, ns, GIPS_NS_MODERATE_SUPPRESSION);
return false;
}
// No typing detection support on iOS or Android.
#endif // !IOS && !ANDROID
return true;
}
struct ResumeEntry {
ResumeEntry(GipsVoiceMediaChannel *c, bool p, SendFlags s)
: channel(c),
playout(p),
send(s) {
}
GipsVoiceMediaChannel *channel;
bool playout;
SendFlags send;
};
// TODO(juberti): Refactor this so that the core logic can be used to set the
// soundclip device. At that time, reinstate the soundclip pause/resume code.
bool GipsVoiceEngine::SetDevices(const Device* in_device,
const Device* out_device) {
#if !defined(IOS) && !defined(ANDROID)
int in_id = in_device ? talk_base::FromString<int>(in_device->id) :
kDefaultAudioDeviceId;
int out_id = out_device ? talk_base::FromString<int>(out_device->id) :
kDefaultAudioDeviceId;
// The device manager uses -1 as the default device, which was the case for
// VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
#ifndef WIN32
if (-1 == in_id) {
in_id = kDefaultAudioDeviceId;
}
if (-1 == out_id) {
out_id = kDefaultAudioDeviceId;
}
#endif
std::string in_name = (in_id != kDefaultAudioDeviceId) ?
in_device->name : "Default device";
std::string out_name = (out_id != kDefaultAudioDeviceId) ?
out_device->name : "Default device";
LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
<< ") and speaker to (id=" << out_id << ", name=" << out_name
<< ")";
// If we're running the local monitor, we need to stop it first.
bool ret = true;
if (!PauseLocalMonitor()) {
LOG(LS_WARNING) << "Failed to pause local monitor";
ret = false;
}
// Must also pause all audio playback and capture.
for (ChannelList::const_iterator i = channels_.begin();
i != channels_.end(); ++i) {
GipsVoiceMediaChannel *channel = *i;
if (!channel->PausePlayout()) {
LOG(LS_WARNING) << "Failed to pause playout";
ret = false;
}
if (!channel->PauseSend()) {
LOG(LS_WARNING) << "Failed to pause send";
ret = false;
}
}
// Find the recording device id in GIPS and set recording device.
if (!FindGipsAudioDeviceId(true, in_name, in_id, &in_id)) {
ret = false;
}
if (ret) {
if (gips_->hw()->GIPSVE_SetRecordingDevice(in_id) == -1) {
LOG_GIPSERR2(SetRecordingDevice, in_device->name, in_id);
ret = false;
}
}
// Find the playout device id in GIPS and set playout device.
if (!FindGipsAudioDeviceId(false, out_name, out_id, &out_id)) {
LOG(LS_WARNING) << "Failed to find gips device id for " << out_name;
ret = false;
}
if (ret) {
if (gips_->hw()->GIPSVE_SetPlayoutDevice(out_id) == -1) {
LOG_GIPSERR2(SetPlayoutDevice, out_device->name, out_id);
ret = false;
}
}
// Resume all audio playback and capture.
for (ChannelList::const_iterator i = channels_.begin();
i != channels_.end(); ++i) {
GipsVoiceMediaChannel *channel = *i;
if (!channel->ResumePlayout()) {
LOG(LS_WARNING) << "Failed to resume playout";
ret = false;
}
if (!channel->ResumeSend()) {
LOG(LS_WARNING) << "Failed to resume send";
ret = false;
}
}
// Resume local monitor.
if (!ResumeLocalMonitor()) {
LOG(LS_WARNING) << "Failed to resume local monitor";
ret = false;
}
if (ret) {
LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
<< ") and speaker to (id="<< out_id << " name=" << out_name
<< ")";
}
return ret;
#else
return true;
#endif // !IOS && !ANDROID
}
bool GipsVoiceEngine::FindGipsAudioDeviceId(
bool is_input, const std::string& dev_name, int dev_id, int* gips_id) {
// In Linux, GIPS uses the same device dev_id as the device manager.
#ifdef LINUX
*gips_id = dev_id;
return true;
#else
// In Windows and Mac, we need to find the GIPS device id by name unless the
// input dev_id is the default device id.
if (kDefaultAudioDeviceId == dev_id) {
*gips_id = dev_id;
return true;
}
// Get the number of GIPS audio devices.
int count = 0;
if (is_input) {
if (-1 == gips_->hw()->GIPSVE_GetNumOfRecordingDevices(count)) {
LOG_GIPSERR0(GetNumOfRecordingDevices);
return false;
}
} else {
if (-1 == gips_->hw()->GIPSVE_GetNumOfPlayoutDevices(count)) {
LOG_GIPSERR0(GetNumOfPlayoutDevices);
return false;
}
}
for (int i = 0; i < count; ++i) {
char name[128];
char guid[128];
if (is_input) {
gips_->hw()->GIPSVE_GetRecordingDeviceName(i, name, guid);
LOG(LS_VERBOSE) << "GIPS microphone " << i << ": " << name;
} else {
gips_->hw()->GIPSVE_GetPlayoutDeviceName(i, name, guid);
LOG(LS_VERBOSE) << "GIPS speaker " << i << ": " << name;
}
std::string gips_name(name);
if (dev_name.compare(0, gips_name.size(), gips_name) == 0) {
*gips_id = i;
return true;
}
}
LOG(LS_WARNING) << "GIPS cannot find device: " << dev_name;
return false;
#endif
}
bool GipsVoiceEngine::GetOutputVolume(int* level) {
unsigned int ulevel;
if (gips_->volume()->GIPSVE_GetSpeakerVolume(ulevel) == -1) {
LOG_GIPSERR1(GetSpeakerVolume, level);
return false;
}
*level = ulevel;
return true;
}
bool GipsVoiceEngine::SetOutputVolume(int level) {
ASSERT(level >= 0 && level <= 255);
if (gips_->volume()->GIPSVE_SetSpeakerVolume(level) == -1) {
LOG_GIPSERR1(SetSpeakerVolume, level);
return false;
}
return true;
}
int GipsVoiceEngine::GetInputLevel() {
unsigned int ulevel;
return (gips_->volume()->GIPSVE_GetSpeechInputLevel(ulevel) != -1) ?
static_cast<int>(ulevel) : -1;
}
bool GipsVoiceEngine::SetLocalMonitor(bool enable) {
desired_local_monitor_enable_ = enable;
return ChangeLocalMonitor(desired_local_monitor_enable_);
}
bool GipsVoiceEngine::ChangeLocalMonitor(bool enable) {
if (enable && !monitor_.get()) {
monitor_.reset(new GipsMonitorStream);
if (gips_->file()->GIPSVE_StartRecordingMicrophone(monitor_.get()) == -1) {
LOG_GIPSERR1(StartRecordingMicrophone, monitor_.get());
// Must call Stop() because there are some cases where Start will report
// failure but still change the state, and if we leave VE in the on state
// then it could crash later when trying to invoke methods on our monitor.
gips_->file()->GIPSVE_StopRecordingMicrophone();
monitor_.reset();
return false;
}
} else if (!enable && monitor_.get()) {
gips_->file()->GIPSVE_StopRecordingMicrophone();
monitor_.reset();
}
return true;
}
bool GipsVoiceEngine::PauseLocalMonitor() {
return ChangeLocalMonitor(false);
}
bool GipsVoiceEngine::ResumeLocalMonitor() {
return ChangeLocalMonitor(desired_local_monitor_enable_);
}
const std::vector<AudioCodec>& GipsVoiceEngine::codecs() {
return codecs_;
}
bool GipsVoiceEngine::FindCodec(const AudioCodec& in) {
return FindGIPSCodec(in, NULL);
}
bool GipsVoiceEngine::FindGIPSCodec(const AudioCodec& in,
GIPS_CodecInst* out) {
int ncodecs = gips_->codec()->GIPSVE_NumOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
GIPS_CodecInst gcodec;
if (gips_->codec()->GIPSVE_GetCodec(i, gcodec) >= 0) {
AudioCodec codec(gcodec.pltype, gcodec.plname,
gcodec.plfreq, gcodec.rate, gcodec.channels, 0);
if (codec.Matches(in)) {
if (out) {
// If the codec is VBR and an explicit rate is specified, use it.
if (in.bitrate != 0 && gcodec.rate == -1) {
gcodec.rate = in.bitrate;
}
*out = gcodec;
}
return true;
}
}
}
return false;
}
void GipsVoiceEngine::SetLogging(int min_sev, const char* filter) {
// if min_sev == -1, we keep the current log level.
if (min_sev >= 0) {
log_level_ = min_sev;
}
log_filter_ = filter;
ApplyLogging(initialized_ ? log_filter_ : "");
}
int GipsVoiceEngine::GetLastGipsError() {
return gips_->error();
}
// We suppport three different logging settings for GIPS:
// 1. Observer callback that goes into talk diagnostic logfile.
// Use --logfile and --loglevel
//
// 2. Encrypted GIPS log for debugging VoiceEngine.
// Use --voice_loglevel --voice_logfilter "tracefile file_name"
//
// 3. EC log and dump for debugging QualityEngine.
// Use --voice_loglevel --voice_logfilter "recordEC file_name"
//
// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
void GipsVoiceEngine::ApplyLogging(const std::string& log_filter) {
// Set log level.
int filter = 0;
switch (log_level_) {
case talk_base::LS_VERBOSE:
filter |= GIPS::TR_ALL; // fall through
case talk_base::LS_INFO:
filter |= GIPS::TR_STATE_INFO; // fall through
case talk_base::LS_WARNING:
filter |= (GIPS::TR_INFO | GIPS::TR_WARNING); // fall through
case talk_base::LS_ERROR:
filter |= (GIPS::TR_ERROR | GIPS::TR_CRITICAL);
}
tracing_->SetTraceFilter(filter);
// Set encrypted trace file.
std::vector<std::string> opts;
talk_base::tokenize(log_filter, ' ', '"', '"', &opts);
std::vector<std::string>::iterator tracefile =
std::find(opts.begin(), opts.end(), "tracefile");
if (tracefile != opts.end() && ++tracefile != opts.end()) {
// Write encrypted debug output (at same loglevel) to file
if (tracing_->SetEncryptedTraceFile(tracefile->c_str()) == -1) {
LOG_GIPSERR1(SetEncryptedTraceFileName, *tracefile);
}
}
// Set AEC dump file
std::vector<std::string>::iterator recordEC =
std::find(opts.begin(), opts.end(), "recordEC");
if (recordEC != opts.end()) {
++recordEC;
if (recordEC != opts.end() && !is_dumping_aec_) {
// Start dumping AEC when we are not dumping and recordEC has a filename.
if (gips_->vqe()->GIPSVE_StartDebugRecording(recordEC->c_str()) == -1) {
LOG_GIPSERR0(StartDebugRecording);
} else {
is_dumping_aec_ = true;
}
} else if (recordEC == opts.end() && is_dumping_aec_) {
// Stop dumping EC when we are dumping and recordEC has no filename.
if (gips_->vqe()->GIPSVE_StopDebugRecording() == -1) {
LOG_GIPSERR0(StopDebugRecording);
}
is_dumping_aec_ = false;
}
}
}
// Ignore spammy trace messages, mostly from the stats API when we haven't
// gotten RTCP info yet from the remote side.
static bool ShouldIgnoreTrace(const std::string& trace) {
static const char* kTracesToIgnore[] = {
"\tfailed to GetReportBlockInformation",
"GetRecCodec() failed to get received codec",
"GetRemoteRTCPData() failed to retrieve sender info for remote side",
"GetRTPStatistics() failed to measure RTT since no RTP packets have been received yet", // NOLINT
"GetRTPStatistics() failed to read RTP statistics from the RTP/RTCP module",
"GetRTPStatistics() failed to retrieve RTT from the RTP/RTCP module",
"RTCPReceiver::SenderInfoReceived No received SR",
"StatisticsRTP() no statisitics availble",
NULL
};
for (const char* const* p = kTracesToIgnore; *p; ++p) {
if (trace.find(*p) == 0) {
return true;
}
}
return false;
}
void GipsVoiceEngine::Print(const GIPS::TraceLevel level,
const char* trace, const int length) {
talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
if (level == GIPS::TR_ERROR || level == GIPS::TR_CRITICAL)
sev = talk_base::LS_ERROR;
else if (level == GIPS::TR_WARNING)
sev = talk_base::LS_WARNING;
else if (level == GIPS::TR_STATE_INFO || level == GIPS::TR_INFO)
sev = talk_base::LS_INFO;
if (sev >= log_level_) {
// Skip past gips boilerplate prefix text
if (length < 72) {
std::string msg(trace, length);
LOG(LS_ERROR) << "Malformed GIPS log message: ";
LOG_V(sev) << msg;
} else {
std::string msg(trace + 71, length - 72);
if (!ShouldIgnoreTrace(msg)) {
LOG_V(sev) << "GIPS_VE: " << msg;
}
}
}
}
void GipsVoiceEngine::CallbackOnError(const int channel_num,
const int err_code) {
talk_base::CritScope lock(&channels_cs_);
GipsVoiceMediaChannel* channel = NULL;
uint32 ssrc = 0;
LOG(LS_WARNING) << "GIPS error " << err_code << " reported on channel "
<< channel_num << ".";
if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
ASSERT(channel != NULL);
channel->OnError(ssrc, err_code);
} else {
LOG(LS_ERROR) << "GIPS channel " << channel_num
<< " could not be found in the channel list when error reported.";
}
}
int GipsVoiceEngine::GetCodecPreference(const char *name, int clockrate) {
for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
if ((_stricmp(kCodecPrefs[i].name, name) == 0) &&
(kCodecPrefs[i].clockrate == clockrate))
return ARRAY_SIZE(kCodecPrefs) - i;
}
LOG(LS_WARNING) << "Unexpected codec \"" << name << "/" << clockrate << "\"";
return -1;
}
bool GipsVoiceEngine::FindChannelAndSsrc(
int channel_num, GipsVoiceMediaChannel** channel, uint32* ssrc) const {
ASSERT(channel != NULL && ssrc != NULL);
*channel = NULL;
*ssrc = 0;
// Find corresponding channel and ssrc
for (ChannelList::const_iterator it = channels_.begin();
it != channels_.end(); ++it) {
ASSERT(*it != NULL);
if ((*it)->FindSsrc(channel_num, ssrc)) {
*channel = *it;
return true;
}
}
return false;
}
void GipsVoiceEngine::RegisterChannel(GipsVoiceMediaChannel *channel) {
talk_base::CritScope lock(&channels_cs_);
channels_.push_back(channel);
}
void GipsVoiceEngine::UnregisterChannel(GipsVoiceMediaChannel *channel) {
talk_base::CritScope lock(&channels_cs_);
ChannelList::iterator i = std::find(channels_.begin(),
channels_.end(),
channel);
if (i != channels_.end()) {
channels_.erase(i);
}
}
void GipsVoiceEngine::RegisterSoundclip(GipsSoundclipMedia *soundclip) {
soundclips_.push_back(soundclip);
}
void GipsVoiceEngine::UnregisterSoundclip(GipsSoundclipMedia *soundclip) {
SoundclipList::iterator i = std::find(soundclips_.begin(),
soundclips_.end(),
soundclip);
if (i != soundclips_.end()) {
soundclips_.erase(i);
}
}
// Adjusts the default AGC target level by the specified delta.
// NB: If we start messing with other config fields, we'll want
// to save the current GIPS_AGC_config as well.
bool GipsVoiceEngine::AdjustAgcLevel(int delta) {
GIPS_AGC_config config = default_agc_config_;
config.targetLeveldBOv += delta;
LOG(LS_INFO) << "Adjusting AGC level from default -"
<< default_agc_config_.targetLeveldBOv << "dB to -"
<< config.targetLeveldBOv << "dB";
if (gips_->vqe()->GIPSVE_SetAGCConfig(config) == -1) {
LOG_GIPSERR1(SetAGCConfig, config.targetLeveldBOv);
return false;
}
return true;
}
// Configures echo cancellation and noise suppression modes according to
// whether or not we are in a multi-point conference.
bool GipsVoiceEngine::SetConferenceMode(bool enable) {
// Only use EC_AECM for mobile.
#if defined(IOS) || defined(ANDROID)
return true;
#endif
LOG(LS_INFO) << (enable ? "Enabling" : "Disabling")
<< " Conference Mode noise reduction";
// We always configure noise suppression on, so just toggle the mode.
const GIPS_NSModes ns_mode = enable ? GIPS_NS_CONFERENCE : GIPS_NS_DEFAULT;
if (gips_->vqe()->GIPSVE_SetNSStatus(true, ns_mode) == -1) {
LOG_GIPSERR2(SetNSStatus, true, ns_mode);
return false;
}
// Echo-cancellation is a user-option, so preserve the enable state and
// just toggle the mode.
bool aec;
GIPS_ECModes ec_mode;
GIPS_AESModes aes_mode;
int aes_attn;
if (gips_->vqe()->GIPSVE_GetECStatus(aec, ec_mode,
aes_mode, aes_attn) == -1) {
LOG_GIPSERR0(GetECStatus);
return false;
}
ec_mode = enable ? GIPS_EC_CONFERENCE : GIPS_EC_DEFAULT;
if (gips_->vqe()->GIPSVE_SetECStatus(aec, ec_mode) == -1) {
LOG_GIPSERR2(GetECStatus, aec, ec_mode);
return false;
}
return true;
}
// GipsVoiceMediaChannel
GipsVoiceMediaChannel::GipsVoiceMediaChannel(GipsVoiceEngine *engine)
: GipsMediaChannel<VoiceMediaChannel, GipsVoiceEngine>(
engine,
engine->gips()->base()->GIPSVE_CreateChannel()),
channel_options_(0),
agc_adjusted_(false),
dtmf_allowed_(false),
desired_playout_(false),
playout_(false),
desired_send_(SEND_NOTHING),
send_(SEND_NOTHING) {
engine->RegisterChannel(this);
LOG(LS_VERBOSE) << "GipsVoiceMediaChannel::GipsVoiceMediaChannel "
<< gips_channel();
// Register external transport
if (engine->gips()->network()->GIPSVE_RegisterExternalTransport(
gips_channel(), *static_cast<GIPS_transport*>(this)) == -1) {
LOG_GIPSERR2(RegisterExternalTransport, gips_channel(), this);
}
// Enable RTCP (for quality stats and feedback messages)
EnableRtcp(gips_channel());
// Create a random but nonzero send SSRC
SetSendSsrc(talk_base::CreateRandomNonZeroId());
#if defined(IOS) || defined(ANDROID)
// Turn on and configure receiving-end auto gain control
if (engine->gips()->vqe()->GIPSVE_SetRxAGCStatus(
gips_channel(), true, GIPS_AGC_FIXED_DIGITAL) != 0) {
LOG(LS_ERROR) << "Failed to set Rx AGC status";
}
// These settings were found to work well on mobile.
GIPS_AGC_config config;
config.targetLeveldBOv = 6;
config.digitalCompressionGaindB = 0;
config.limiterEnable = true;
if (engine->gips()->vqe()->GIPSVE_SetRxAGCConfig(gips_channel(),
config) != 0) {
LOG(LS_ERROR) << "Failed to set Rx AGC config for channel "
<< gips_channel();
}
#endif // IOS || ANDROID
}
GipsVoiceMediaChannel::~GipsVoiceMediaChannel() {
LOG(LS_VERBOSE) << "GipsVoiceMediaChannel::~GipsVoiceMediaChannel "
<< gips_channel();
// DeRegister external transport
if (engine()->gips()->network()->GIPSVE_DeRegisterExternalTransport(
gips_channel()) == -1) {
LOG_GIPSERR1(DeRegisterExternalTransport, gips_channel());
}
// Unregister ourselves from the engine.
engine()->UnregisterChannel(this);
// Remove any remaining streams.
while (!mux_channels_.empty()) {
RemoveStream(mux_channels_.begin()->first);
}
// Delete the primary channel.
if (engine()->gips()->base()->GIPSVE_DeleteChannel(gips_channel()) == -1) {
LOG_GIPSERR1(DeleteChannel, gips_channel());
}
}
bool GipsVoiceMediaChannel::SetOptions(int flags) {
// Always accept flags that are unchanged.
if (channel_options_ == flags) {
return true;
}
// Reject new options if we're already sending.
if (send_ != SEND_NOTHING) {
return false;
}
// Save the options, to be interpreted where appropriate.
channel_options_ = flags;
return true;
}
bool GipsVoiceMediaChannel::SetRecvCodecs(
const std::vector<AudioCodec>& codecs) {
// Update our receive payload types to match what we offered. This only is
// an issue when a different entity (i.e. a server) is generating the offer
// for us.
bool ret = true;
for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
i != codecs.end() && ret; ++i) {
GIPS_CodecInst gcodec;
if (engine()->FindGIPSCodec(*i, &gcodec)) {
if (gcodec.pltype != i->id) {
LOG(LS_INFO) << "Updating payload type for " << gcodec.plname
<< " from " << gcodec.pltype << " to " << i->id;
gcodec.pltype = i->id;
if (engine()->gips()->codec()->GIPSVE_SetRecPayloadType(
gips_channel(), gcodec) == -1) {
LOG_GIPSERR1(SetRecPayloadType, gips_channel());
ret = false;
}
}
} else {
LOG(LS_WARNING) << "Unknown codec " << i->name;
ret = false;
}
}
return ret;
}
bool GipsVoiceMediaChannel::SetSendCodecs(
const std::vector<AudioCodec>& codecs) {
// Disable DTMF, VAD, and FEC unless we know the other side wants them.
dtmf_allowed_ = false;
engine()->gips()->codec()->GIPSVE_SetVADStatus(gips_channel(), false);
engine()->gips()->rtp()->GIPSVE_SetFECStatus(gips_channel(), false);
// Scan through the list to figure out the codec to use for sending, along
// with the proper configuration for VAD and DTMF.
bool first = true;
GIPS_CodecInst send_codec;
memset(&send_codec, 0, sizeof(send_codec));
for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
i != codecs.end(); ++i) {
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
GIPS_CodecInst gcodec;
if (!engine()->FindGIPSCodec(*i, &gcodec)) {
LOG(LS_WARNING) << "Unknown codec " << i->name;
continue;
}
// Find the DTMF telephone event "codec" and tell gips about it.
if (_stricmp(i->name.c_str(), "telephone-event") == 0 ||
_stricmp(i->name.c_str(), "audio/telephone-event") == 0) {
engine()->gips()->dtmf()->GIPSVE_SetSendTelephoneEventPayloadType(
gips_channel(), i->id);
dtmf_allowed_ = true;
}
// Turn voice activity detection/comfort noise on if supported.
// Set the wideband CN payload type appropriately
// (narrowband always uses the static payload type 13).
if (_stricmp(i->name.c_str(), "CN") == 0) {
GIPS_PayloadFrequencies cn_freq;
switch (i->clockrate) {
case 8000:
cn_freq = GIPS_FREQ_8000_HZ;
break;
case 16000:
cn_freq = GIPS_FREQ_16000_HZ;
break;
case 32000:
cn_freq = GIPS_FREQ_32000_HZ;
break;
default:
LOG(LS_WARNING) << "CN frequency " << i->clockrate
<< " not supported.";
continue;
}
engine()->gips()->codec()->GIPSVE_SetVADStatus(gips_channel(), true);
if (cn_freq != GIPS_FREQ_8000_HZ) {
engine()->gips()->codec()->GIPSVE_SetSendCNPayloadType(gips_channel(),
i->id, cn_freq);
}
}
// We'll use the first codec in the list to actually send audio data.
// Be sure to use the payload type requested by the remote side.
// "red", for FEC audio, is a special case where the actual codec to be
// used is specified in params.
if (first) {
if (_stricmp(i->name.c_str(), "red") == 0) {
// Parse out the RED parameters. If we fail, just ignore RED;
// we don't support all possible params/usage scenarios.
if (!GetRedSendCodec(*i, codecs, &send_codec)) {
continue;
}
// Enable redundant encoding of the specified codec. Treat any
// failure as a fatal internal error.
LOG(LS_INFO) << "Enabling RED";
if (engine()->gips()->rtp()->GIPSVE_SetFECStatus(gips_channel(),
true, i->id) == -1) {
LOG_GIPSERR3(SetFECStatus, gips_channel(), true, i->id);
return false;
}
} else {
send_codec = gcodec;
send_codec.pltype = i->id;
}
first = false;
}
}
// If we're being asked to set an empty list of codecs, due to a buggy client,
// choose the most common format: PCMU
if (first) {
LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
engine()->FindGIPSCodec(codec, &send_codec);
}
// Set the codec.
LOG(LS_INFO) << "Selected voice codec " << send_codec.plname
<< "/" << send_codec.plfreq;
if (engine()->gips()->codec()->GIPSVE_SetSendCodec(gips_channel(),
send_codec) == -1) {
LOG_GIPSERR1(SetSendCodec, gips_channel());
return false;
}
return true;
}
bool GipsVoiceMediaChannel::SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
// We don't support any incoming extensions headers right now.
return true;
}
bool GipsVoiceMediaChannel::SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
// Enable the audio level extension header if requested.
std::vector<RtpHeaderExtension>::const_iterator it;
for (it = extensions.begin(); it != extensions.end(); ++it) {
if (it->uri == kRtpAudioLevelHeaderExtension) {
break;
}
}
bool enable = (it != extensions.end());
int id = 0;
if (enable) {
id = it->id;
if (id < kMinRtpHeaderExtensionId ||
id > kMaxRtpHeaderExtensionId) {
LOG(LS_WARNING) << "Invalid RTP header extension id " << id;
return false;
}
}
// This api call is not available in iOS version of gips currently.
#if !defined(IOS) && !defined(ANDROID)
if (engine()->gips()->rtp()->GIPSVE_SetRTPAudioLevelIndicationStatus(
gips_channel(), enable, id) == -1) {
LOG_GIPSERR3(SetRTPAudioLevelIndicationStatus, gips_channel(), enable, id);
return false;
}
#endif
return true;
}
bool GipsVoiceMediaChannel::SetPlayout(bool playout) {
desired_playout_ = playout;
return ChangePlayout(desired_playout_);
}
bool GipsVoiceMediaChannel::PausePlayout() {
return ChangePlayout(false);
}
bool GipsVoiceMediaChannel::ResumePlayout() {
return ChangePlayout(desired_playout_);
}
bool GipsVoiceMediaChannel::ChangePlayout(bool playout) {
if (playout_ == playout) {
return true;
}
bool result = true;
if (mux_channels_.empty()) {
// Only toggle the default channel if we don't have any other channels.
result = SetPlayout(gips_channel(), playout);
}
for (ChannelMap::iterator it = mux_channels_.begin();
it != mux_channels_.end() && result; ++it) {
if (!SetPlayout(it->second, playout)) {
LOG(LS_ERROR) << "SetPlayout " << playout << " on channel " << it->second
<< " failed";
result = false;
}
}
if (result) {
playout_ = playout;
}
return result;
}
bool GipsVoiceMediaChannel::SetSend(SendFlags send) {
desired_send_ = send;
return ChangeSend(desired_send_);
}
bool GipsVoiceMediaChannel::PauseSend() {
return ChangeSend(SEND_NOTHING);
}
bool GipsVoiceMediaChannel::ResumeSend() {
return ChangeSend(desired_send_);
}
bool GipsVoiceMediaChannel::ChangeSend(SendFlags send) {
if (send_ == send) {
return true;
}
if (send == SEND_MICROPHONE) {
#ifdef CHROMEOS
// Conference mode doesn't work well on ChromeOS.
if (!engine()->SetConferenceMode(false)) {
LOG_GIPSERR1(SetConferenceMode, gips_channel());
return false;
}
#else
// Multi-point conferences use conference-mode noise filtering.
if (!engine()->SetConferenceMode(
0 != (channel_options_ & OPT_CONFERENCE))) {
LOG_GIPSERR1(SetConferenceMode, gips_channel());
return false;
}
#endif // CHROMEOS
// Tandberg-bridged conferences have an AGC target that is lower than
// GTV-only levels.
/* :BRUNO: disable this since this is built for tandberg */
#ifndef BRUNO
if ((channel_options_ & OPT_AGC_TANDBERG_LEVELS) && !agc_adjusted_) {
if (engine()->AdjustAgcLevel(kTandbergDbAdjustment)) {
agc_adjusted_ = true;
}
}
#endif
// GIPS resets sequence number when StopSend is called. This
// sometimes causes libSRTP to complain about packets being
// replayed. To get around this we store the last sent sequence
// number and initializes the channel with the next to continue on
// the same sequence.
if (sequence_number() != -1) {
LOG(LS_INFO) << "GipsVoiceMediaChannel restores seqnum="
<< sequence_number() + 1;
if (engine()->gips()->sync()->GIPSVE_SetInitSequenceNumber(
gips_channel(), sequence_number() + 1) == -1) {
LOG_GIPSERR2(SetInitSequenceNumber, gips_channel(),
sequence_number() + 1);
}
}
if (engine()->gips()->base()->GIPSVE_StartSend(gips_channel()) == -1) {
LOG_GIPSERR1(StartSend, gips_channel());
return false;
}
if (engine()->gips()->file()->GIPSVE_StopPlayingFileAsMicrophone(
gips_channel()) == -1) {
LOG_GIPSERR1(StopPlayingFileAsMicrophone, gips_channel());
return false;
}
} else if (send == SEND_RINGBACKTONE) {
ASSERT(ringback_tone_.get() != NULL);
if (!ringback_tone_.get()) {
return false;
}
if (engine()->gips()->file()->GIPSVE_StartPlayingFileAsMicrophone(
gips_channel(), ringback_tone_.get(), false) == -1) {
LOG_GIPSERR3(StartPlayingFileAsMicrophone, gips_channel(),
ringback_tone_.get(), false);
return false;
}
// GIPS resets sequence number when StopSend is called. This
// sometimes causes libSRTP to complain about packets being
// replayed. To get around this we store the last sent sequence
// number and initializes the channel with the next to continue on
// the same sequence.
if (sequence_number() != -1) {
LOG(LS_INFO) << "GipsVoiceMediaChannel restores seqnum="
<< sequence_number() + 1;
if (engine()->gips()->sync()->GIPSVE_SetInitSequenceNumber(
gips_channel(), sequence_number() + 1) == -1) {
LOG_GIPSERR2(SetInitSequenceNumber, gips_channel(),
sequence_number() + 1);
}
}
if (engine()->gips()->base()->GIPSVE_StartSend(gips_channel()) == -1) {
LOG_GIPSERR1(StartSend, gips_channel());
return false;
}
} else { // SEND_NOTHING
if (engine()->gips()->base()->GIPSVE_StopSend(gips_channel()) == -1) {
LOG_GIPSERR1(StopSend, gips_channel());
}
// Reset the AGC level, if it was set.
if (agc_adjusted_) {
if (engine()->AdjustAgcLevel(0)) {
agc_adjusted_ = false;
}
}
// Disable conference-mode noise filtering.
if (!engine()->SetConferenceMode(false)) {
LOG_GIPSERR1(SetConferenceMode, gips_channel());
}
}
send_ = send;
return true;
}
bool GipsVoiceMediaChannel::AddStream(uint32 ssrc) {
talk_base::CritScope lock(&mux_channels_cs_);
if (mux_channels_.find(ssrc) != mux_channels_.end()) {
return false;
}
// Create a new channel for receiving audio data.
int channel = engine()->gips()->base()->GIPSVE_CreateChannel();
if (channel == -1) {
LOG_GIPSERR0(CreateChannel);
return false;
}
// Configure to use external transport, like our default channel.
if (engine()->gips()->network()->GIPSVE_RegisterExternalTransport(
channel, *this) == -1) {
LOG_GIPSERR2(SetExternalTransport, channel, this);
return false;
}
// Use the same SSRC as our default channel (so the RTCP reports are correct).
unsigned int send_ssrc;
GIPSVERTP_RTCP* rtp = engine()->gips()->rtp();
if (rtp->GIPSVE_GetLocalSSRC(gips_channel(), send_ssrc) == -1) {
LOG_GIPSERR2(GetSendSSRC, channel, send_ssrc);
return false;
}
if (rtp->GIPSVE_SetLocalSSRC(channel, send_ssrc) == -1) {
LOG_GIPSERR2(SetSendSSRC, channel, send_ssrc);
return false;
}
if (mux_channels_.empty() && playout_) {
// This is the first stream in a multi user meeting. We can now
// disable playback of the default stream. This since the default
// stream will probably have received some initial packets before
// the new stream was added. This will mean that the CN state from
// the default channel will be mixed in with the other streams
// throughout the whole meeting, which might be disturbing.
LOG(LS_INFO) << "Disabling playback on the default voice channel";
SetPlayout(gips_channel(), false);
}
mux_channels_[ssrc] = channel;
// TODO(juberti): We should rollback the add if SetPlayout fails.
LOG(LS_INFO) << "New audio stream " << ssrc << " registered to channel "
<< channel << ".";
return SetPlayout(channel, playout_);
}
bool GipsVoiceMediaChannel::RemoveStream(uint32 ssrc) {
talk_base::CritScope lock(&mux_channels_cs_);
ChannelMap::iterator it = mux_channels_.find(ssrc);
if (it != mux_channels_.end()) {
if (engine()->gips()->network()->GIPSVE_DeRegisterExternalTransport(
it->second) == -1) {
LOG_GIPSERR1(DeRegisterExternalTransport, it->second);
}
LOG(LS_INFO) << "Removing audio stream " << ssrc << " with channel "
<< it->second << ".";
if (engine()->gips()->base()->GIPSVE_DeleteChannel(it->second) == -1) {
LOG_GIPSERR1(DeleteChannel, gips_channel());
return false;
}
mux_channels_.erase(it);
if (mux_channels_.empty() && playout_) {
// The last stream was removed. We can now enable the default
// channel for new channels to be played out immediately without
// waiting for AddStream messages.
// TODO(oja): Does the default channel still have it's CN state?
LOG(LS_INFO) << "Enabling playback on the default voice channel";
SetPlayout(gips_channel(), true);
}
}
return true;
}
bool GipsVoiceMediaChannel::GetActiveStreams(AudioInfo::StreamList* actives) {
actives->clear();
for (ChannelMap::iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
int level = GetOutputLevel(it->second);
if (level > 0) {
actives->push_back(std::make_pair(it->first, level));
}
}
return true;
}
int GipsVoiceMediaChannel::GetOutputLevel() {
// return the highest output level of all streams
int highest = GetOutputLevel(gips_channel());
for (ChannelMap::iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
int level = GetOutputLevel(it->second);
highest = talk_base::_max(level, highest);
}
return highest;
}
bool GipsVoiceMediaChannel::SetOutputScaling(
uint32 ssrc, double left, double right) {
talk_base::CritScope lock(&mux_channels_cs_);
// Collect the channels to scale the output volume.
std::vector<int> channels;
if (0 == ssrc) { // Collect all channels, including the default one.
channels.push_back(gips_channel());
for (ChannelMap::const_iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
channels.push_back(it->second);
}
} else { // Collect only the channel of the specified ssrc.
int channel = GetChannel(ssrc);
if (-1 == channel) {
LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
return false;
}
channels.push_back(channel);
}
// Scale the output volume for the collected channels. We first set the
// scaling and then set the pan.
float scale = static_cast<float>(talk_base::_max(left, right));
if (scale > 0.0001f) {
left /= scale;
right /= scale;
}
for (std::vector<int>::const_iterator it = channels.begin();
it != channels.end(); ++it) {
if (-1 == engine()->gips()->volume()->GIPSVE_SetChannelOutputVolumeScaling(
*it, scale)) {
LOG_GIPSERR2(SetChannelOutputVolumeScaling, *it, scale);
return false;
}
if (-1 == engine()->gips()->volume()->GIPSVE_SetOutputVolumePan(
*it, static_cast<float>(left), static_cast<float>(right))) {
LOG_GIPSERR3(SetOutputVolumePan, *it, left, right);
// Do not return if fails. SetOutputVolumePan is not available for all
// pltforms.
}
LOG(LS_INFO) << "SetOutputScaling to left=" << left * scale
<< " right=" << right * scale
<< " for channel " << *it << " and ssrc " << ssrc;
}
return true;
}
bool GipsVoiceMediaChannel::GetOutputScaling(
uint32 ssrc, double* left, double* right) {
if (!left || !right) return false;
talk_base::CritScope lock(&mux_channels_cs_);
// Determine which channel based on ssrc.
int channel = (0 == ssrc) ? gips_channel() : GetChannel(ssrc);
if (channel == -1) {
LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
return false;
}
float scaling;
if (-1 == engine()->gips()->volume()->GIPSVE_GetChannelOutputVolumeScaling(
channel, scaling)) {
LOG_GIPSERR2(GetChannelOutputVolumeScaling, channel, scaling);
return false;
}
float left_pan;
float right_pan;
if (-1 == engine()->gips()->volume()->GIPSVE_GetOutputVolumePan(
channel, left_pan, right_pan)) {
LOG_GIPSERR3(GIPSVE_GetOutputVolumePan, channel, left_pan, right_pan);
// If GetOutputVolumePan fails, we use the default left and right pan.
left_pan = 1.0f;
right_pan = 1.0f;
}
*left = scaling * left_pan;
*right = scaling * right_pan;
return true;
}
bool GipsVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
ringback_tone_.reset(new GipsSoundclipStream(buf, len));
return true;
}
bool GipsVoiceMediaChannel::PlayRingbackTone(uint32 ssrc,
bool play, bool loop) {
if (!ringback_tone_.get()) {
return false;
}
// Determine which GIPS channel to play on.
int channel = (ssrc == 0) ? gips_channel() : GetChannel(ssrc);
if (channel == -1) {
return false;
}
// Make sure the ringtone is cued properly, and play it out.
if (play) {
ringback_tone_->set_loop(loop);
ringback_tone_->Rewind();
if (engine()->gips()->file()->GIPSVE_StartPlayingFileLocally(channel,
ringback_tone_.get()) == -1) {
LOG_GIPSERR2(StartPlayingFileLocally, channel, ringback_tone_.get());
LOG(LS_ERROR) << "Unable to start ringback tone";
return false;
}
ringback_channels_.insert(channel);
LOG(LS_INFO) << "Started ringback on channel " << channel;
} else {
if (engine()->gips()->file()->GIPSVE_StopPlayingFileLocally(channel)
== -1) {
LOG_GIPSERR1(StopPlayingFileLocally, channel);
return false;
}
LOG(LS_INFO) << "Stopped ringback on channel " << channel;
ringback_channels_.erase(channel);
}
return true;
}
bool GipsVoiceMediaChannel::PressDTMF(int event, bool playout) {
if (!dtmf_allowed_) {
return false;
}
// Enable or disable DTMF playout of this tone as requested. This will linger
// until the next call to this method, but that's OK.
if (engine()->gips()->dtmf()->GIPSVE_SetDTMFFeedbackStatus(playout) == -1) {
LOG_GIPSERR2(SendDTMF, gips_channel(), playout);
return false;
}
// Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
if (engine()->gips()->dtmf()->GIPSVE_SendTelephoneEvent(gips_channel(), event,
true) == -1) {
LOG_GIPSERR3(SendDTMF, gips_channel(), event, true);
return false;
}
return true;
}
void GipsVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
// Pick which channel to send this packet to. If this packet doesn't match
// any multiplexed streams, just send it to the default channel. Otherwise,
// send it to the specific decoder instance for that stream.
int which_channel = GetChannel(
ParseSsrc(packet->data(), packet->length(), false));
if (which_channel == -1) {
which_channel = gips_channel();
}
// Stop any ringback that might be playing on the channel.
// It's possible the ringback has already stopped, ih which case we'll just
// use the opportunity to remove the channel from ringback_channels_.
const std::set<int>::iterator it = ringback_channels_.find(which_channel);
if (it != ringback_channels_.end()) {
if (engine()->gips()->file()->GIPSVE_IsPlayingFileLocally(
which_channel) == 1) {
engine()->gips()->file()->GIPSVE_StopPlayingFileLocally(which_channel);
LOG(LS_INFO) << "Stopped ringback on channel " << which_channel
<< " due to incoming media";
}
ringback_channels_.erase(which_channel);
}
// Pass it off to the decoder.
engine()->gips()->network()->GIPSVE_ReceivedRTPPacket(which_channel,
packet->data(),
packet->length());
}
void GipsVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
// See above.
int which_channel = GetChannel(
ParseSsrc(packet->data(), packet->length(), true));
if (which_channel == -1) {
which_channel = gips_channel();
}
engine()->gips()->network()->GIPSVE_ReceivedRTCPPacket(which_channel,
packet->data(),
packet->length());
}
void GipsVoiceMediaChannel::SetSendSsrc(uint32 ssrc) {
if (engine()->gips()->rtp()->GIPSVE_SetLocalSSRC(gips_channel(), ssrc)
== -1) {
LOG_GIPSERR2(SetSendSSRC, gips_channel(), ssrc);
}
}
bool GipsVoiceMediaChannel::SetRtcpCName(const std::string& cname) {
if (engine()->gips()->rtp()->GIPSVE_SetRTCP_CNAME(gips_channel(),
cname.c_str()) == -1) {
LOG_GIPSERR2(SetRTCP_CNAME, gips_channel(), cname);
return false;
}
return true;
}
bool GipsVoiceMediaChannel::Mute(bool muted) {
if (engine()->gips()->volume()->GIPSVE_SetInputMute(gips_channel(),
muted) == -1) {
LOG_GIPSERR2(SetInputMute, gips_channel(), muted);
return false;
}
return true;
}
bool GipsVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
// In VoiceEngine 3.5, GetRTCPStatistics will return 0 even when it fails,
// causing the stats to contain garbage information. To prevent this, we
// zero the stats structure before calling this API.
// TODO(juberti): Remove this workaround.
GIPS_CallStatistics cs;
unsigned int ssrc;
GIPS_CodecInst codec;
unsigned int level;
// Fill in the sender info, based on what we know, and what the
// remote side told us it got from its RTCP report.
VoiceSenderInfo sinfo;
// Data we obtain locally.
memset(&cs, 0, sizeof(cs));
if (engine()->gips()->rtp()->GIPSVE_GetRTCPStatistics(
gips_channel(), cs) == -1 ||
engine()->gips()->rtp()->GIPSVE_GetLocalSSRC(
gips_channel(), ssrc) == -1) {
return false;
}
sinfo.ssrc = ssrc;
sinfo.bytes_sent = cs.bytesSent;
sinfo.packets_sent = cs.packetsSent;
// RTT isn't known until a RTCP report is received. Until then, GIPS
// returns 0 to indicate an error value.
sinfo.rtt_ms = (cs.rttMs > 0) ? cs.rttMs : -1;
// Data from the last remote RTCP report.
unsigned int ntp_high, ntp_low, timestamp, ptimestamp, jitter;
unsigned short loss; // NOLINT
if (engine()->gips()->rtp()->GIPSVE_GetRemoteRTCPData(gips_channel(),
ntp_high, ntp_low, timestamp, ptimestamp, &jitter, &loss) != -1 &&
engine()->gips()->codec()->GIPSVE_GetSendCodec(gips_channel(),
codec) != -1) {
// Convert Q8 to floating point.
sinfo.fraction_lost = static_cast<float>(loss) / (1 << 8);
// Convert samples to milliseconds.
if (codec.plfreq / 1000 > 0) {
sinfo.jitter_ms = jitter / (codec.plfreq / 1000);
}
} else {
sinfo.fraction_lost = -1;
sinfo.jitter_ms = -1;
}
// TODO(juberti): Figure out how to get remote packets_lost, ext_seqnum
sinfo.packets_lost = -1;
sinfo.ext_seqnum = -1;
// Local speech level.
sinfo.audio_level = (engine()->gips()->volume()->
GIPSVE_GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
info->senders.push_back(sinfo);
// Build the list of receivers, one for each mux channel, or 1 in a 1:1 call.
std::vector<int> channels;
for (ChannelMap::const_iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
channels.push_back(it->second);
}
if (channels.empty()) {
channels.push_back(gips_channel());
}
// Get the SSRC and stats for each receiver, based on our own calculations.
for (std::vector<int>::const_iterator it = channels.begin();
it != channels.end(); ++it) {
memset(&cs, 0, sizeof(cs));
if (engine()->gips()->rtp()->GIPSVE_GetRemoteSSRC(*it, ssrc) != -1 &&
engine()->gips()->rtp()->GIPSVE_GetRTCPStatistics(*it, cs) != -1 &&
engine()->gips()->codec()->GIPSVE_GetRecCodec(*it, codec) != -1) {
VoiceReceiverInfo rinfo;
rinfo.ssrc = ssrc;
rinfo.bytes_rcvd = cs.bytesReceived;
rinfo.packets_rcvd = cs.packetsReceived;
// The next four fields are from the most recently sent RTCP report.
// Convert Q8 to floating point.
rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
rinfo.packets_lost = cs.cumulativeLost;
rinfo.ext_seqnum = cs.extendedMax;
// Convert samples to milliseconds.
if (codec.plfreq / 1000 > 0) {
rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
}
// Get jitter buffer and total delay (alg + jitter + playout) stats.
GIPS_NetworkStatistics ns;
if (engine()->gips()->neteq() &&
engine()->gips()->neteq()->GIPSVE_GetNetworkStatistics(
*it, ns) != -1) {
rinfo.jitter_buffer_ms = ns.currentBufferSize;
rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
}
if (engine()->gips()->sync()) {
engine()->gips()->sync()->GIPSVE_GetDelayEstimate(*it,
rinfo.delay_estimate_ms);
}
// Get speech level.
rinfo.audio_level = (engine()->gips()->volume()->
GIPSVE_GetSpeechOutputLevelFullRange(*it, level) != -1) ? level : -1;
info->receivers.push_back(rinfo);
}
}
return true;
}
void GipsVoiceMediaChannel::GetLastMediaError(
uint32* ssrc, VoiceMediaChannel::Error* error) {
ASSERT(ssrc != NULL);
ASSERT(error != NULL);
FindSsrc(gips_channel(), ssrc);
*error = GipsErrorToChannelError(GetLastGipsError());
}
bool GipsVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
talk_base::CritScope lock(&mux_channels_cs_);
ASSERT(ssrc != NULL);
if (channel_num == gips_channel()) {
unsigned local_ssrc = 0;
// This is a sending channel.
if (engine()->gips()->rtp()->GIPSVE_GetLocalSSRC(
channel_num, local_ssrc) != -1) {
*ssrc = local_ssrc;
}
return true;
} else if (channel_num == -1 && send_ != SEND_NOTHING) {
// Sometimes the GIPS core will throw error with channel_num = -1. This
// means the error is not limited to a specific channel. Signal the
// message using ssrc=0. If the current channel is sending, use this
// channel for sending the message.
*ssrc = 0;
return true;
} else {
// Check whether this is a receiving channel.
for (ChannelMap::const_iterator it = mux_channels_.begin();
it != mux_channels_.end(); ++it) {
if (it->second == channel_num) {
*ssrc = it->first;
return true;
}
}
}
return false;
}
void GipsVoiceMediaChannel::OnError(uint32 ssrc, int error) {
SignalMediaError(ssrc, GipsErrorToChannelError(error));
}
int GipsVoiceMediaChannel::GetChannel(uint32 ssrc) {
ChannelMap::iterator it = mux_channels_.find(ssrc);
return (it != mux_channels_.end()) ? it->second : -1;
}
int GipsVoiceMediaChannel::GetOutputLevel(int channel) {
unsigned int ulevel;
int ret =
engine()->gips()->volume()->GIPSVE_GetSpeechOutputLevel(channel, ulevel);
return (ret == 0) ? static_cast<int>(ulevel) : -1;
}
bool GipsVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
const std::vector<AudioCodec>& all_codecs, GIPS_CodecInst* send_codec) {
// Get the RED encodings from the parameter with no name. This may
// change based on what is discussed on the Jingle list.
// The encoding parameter is of the form "a/b"; we only support where
// a == b. Verify this and parse out the value into red_pt.
// If the parameter value is absent (as it will be until we wire up the
// signaling of this message), use the second codec specified (i.e. the
// one after "red") as the encoding parameter.
int red_pt = -1;
std::string red_params;
CodecParameterMap::const_iterator it = red_codec.params.find("");
if (it != red_codec.params.end()) {
red_params = it->second;
std::vector<std::string> red_pts;
if (talk_base::split(red_params, '/', &red_pts) != 2 ||
red_pts[0] != red_pts[1] ||
!talk_base::FromString(red_pts[0], &red_pt)) {
LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
return false;
}
} else if (red_codec.params.empty()) {
LOG(LS_WARNING) << "RED params not present, using defaults";
if (all_codecs.size() > 1) {
red_pt = all_codecs[1].id;
}
}
// Try to find red_pt in |codecs|.
std::vector<AudioCodec>::const_iterator codec;
for (codec = all_codecs.begin(); codec != all_codecs.end(); ++codec) {
if (codec->id == red_pt)
break;
}
// If we find the right codec, that will be the codec we pass to
// GIPSVE_SetSendCodec, with the desired payload type.
if (codec != all_codecs.end() &&
engine()->FindGIPSCodec(*codec, send_codec)) {
send_codec->pltype = red_pt;
} else {
LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
return false;
}
return true;
}
bool GipsVoiceMediaChannel::EnableRtcp(int channel) {
if (engine()->gips()->rtp()->GIPSVE_SetRTCPStatus(channel, true) == -1) {
LOG_GIPSERR2(SetRTCPStatus, gips_channel(), 1);
return false;
}
// TODO(juberti): Enable VQMon and RTCP XR reports, once we know what
// what we want to do with them.
// engine()->gips().GIPSVE_EnableVQMon(gips_channel(), true);
// engine()->gips().GIPSVE_EnableRTCP_XR(gips_channel(), true);
return true;
}
bool GipsVoiceMediaChannel::SetPlayout(int channel, bool playout) {
if (playout) {
LOG(LS_INFO) << "Starting playout for channel " << channel;
if (engine()->gips()->base()->GIPSVE_StartPlayout(channel) == -1) {
LOG_GIPSERR1(StartPlayout, channel);
return false;
}
} else {
LOG(LS_INFO) << "Stopping playout for channel " << channel;
engine()->gips()->base()->GIPSVE_StopPlayout(channel);
}
return true;
}
uint32 GipsVoiceMediaChannel::ParseSsrc(const void* data, size_t len,
bool rtcp) {
size_t ssrc_pos = (!rtcp) ? 8 : 4;
uint32 ssrc = 0;
if (len >= (ssrc_pos + sizeof(ssrc))) {
ssrc = talk_base::GetBE32(static_cast<const char*>(data) + ssrc_pos);
}
return ssrc;
}
// Convert GIPS error code into VoiceMediaChannel::Error enum.
VoiceMediaChannel::Error GipsVoiceMediaChannel::GipsErrorToChannelError(
int err_code) {
switch (err_code) {
case 0:
return ERROR_NONE;
case VE_CANNOT_START_RECORDING:
case VE_MIC_VOL_ERROR:
case VE_GET_MIC_VOL_ERROR:
case VE_CANNOT_ACCESS_MIC_VOL:
return ERROR_REC_DEVICE_OPEN_FAILED;
case VE_SATURATION_WARNING:
return ERROR_REC_DEVICE_SATURATION;
case VE_REC_DEVICE_REMOVED:
return ERROR_REC_DEVICE_REMOVED;
case VE_RUNTIME_REC_WARNING:
case VE_RUNTIME_REC_ERROR:
return ERROR_REC_RUNTIME_ERROR;
case VE_CANNOT_START_PLAYOUT:
case VE_SPEAKER_VOL_ERROR:
case VE_GET_SPEAKER_VOL_ERROR:
case VE_CANNOT_ACCESS_SPEAKER_VOL:
return ERROR_PLAY_DEVICE_OPEN_FAILED;
case VE_RUNTIME_PLAY_WARNING:
case VE_RUNTIME_PLAY_ERROR:
return ERROR_PLAY_RUNTIME_ERROR;
case VE_TYPING_NOISE_WARNING:
return ERROR_REC_TYPING_NOISE_DETECTED;
default:
return VoiceMediaChannel::ERROR_OTHER;
}
}
int GipsSoundclipStream::Read(void *buf, int len) {
size_t res = 0;
mem_.Read(buf, len, &res, NULL);
return res;
}
int GipsSoundclipStream::Rewind() {
mem_.Rewind();
// Return -1 to keep GIPS from looping.
return (loop_) ? 0 : -1;
}
} // namespace cricket
#endif // HAVE_GIPS
// shhhhh}