Add gips mediaengine in libjingle code.
- Add gip media engine wrapper and update mediaengine.cc.
Based on instruction from
https://sites.google.com/a/google.com/wavelet/Home/Magic-Flute--RTC-Engine-/how-to-make-a-magic-flute-release#p10-client
The current CL is googletalkplugin_release_branch@25679112-p10
Change-Id: Ibe50141e5aa398f059ac6249cac5227ffe439e0c
diff --git a/talk/libjingle.scons b/talk/libjingle.scons
index 5165b48..6713149 100644
--- a/talk/libjingle.scons
+++ b/talk/libjingle.scons
@@ -118,7 +118,7 @@
"EXPAT_RELATIVE_PATH",
"SRTP_RELATIVE_PATH",
"XML_STATIC",
- "HAS_GIPS",
+ "HAVE_GIPS",
],
srcs = [
"base/asyncfile.cc",
@@ -212,6 +212,7 @@
"session/phone/currentspeakermonitor.cc",
"session/phone/devicemanager.cc",
"session/phone/filemediaengine.cc",
+ "session/phone/gipsmediaengine.cc",
"session/phone/mediaengine.cc",
"session/phone/mediamessages.cc",
"session/phone/mediamonitor.cc",
diff --git a/talk/session/phone/gips.h b/talk/session/phone/gips.h
new file mode 100644
index 0000000..14589fd
--- /dev/null
+++ b/talk/session/phone/gips.h
@@ -0,0 +1,161 @@
+// Copyright 2007 Google Inc. All Rights Reserved
+// Author: juberti@google.com (Justin Uberti)
+
+#ifndef TALK_SESSION_PHONE_GIPS_H_
+#define TALK_SESSION_PHONE_GIPS_H_
+
+#include "talk/base/common.h"
+#include "talk/third_party/gips/Interface/GIPSVEBase.h"
+#include "talk/third_party/gips/Interface/GIPSVECodec.h"
+#include "talk/third_party/gips/Interface/GIPSVEDTMF.h"
+#include "talk/third_party/gips/Interface/GIPSVEErrors.h"
+#include "talk/third_party/gips/Interface/GIPSVEExternalMedia.h"
+#include "talk/third_party/gips/Interface/GIPSVEFile.h"
+#include "talk/third_party/gips/Interface/GIPSVEHardware.h"
+#include "talk/third_party/gips/Interface/GIPSVENetwork.h"
+#include "talk/third_party/gips/Interface/GIPSVENetEqStats.h"
+#include "talk/third_party/gips/Interface/GIPSVERTP_RTCP.h"
+#include "talk/third_party/gips/Interface/GIPSVEVideoSync.h"
+#include "talk/third_party/gips/Interface/GIPSVEVolumeControl.h"
+#include "talk/third_party/gips/Interface/GIPSVEVQE.h"
+
+// Tracing helpers, for easy logging when GIPS calls fail.
+// Example: "LOG_GIPSERR1(StartSend, channel);" produces the trace
+// "GIPSVE_StartSend(1) failed, err=XXXX"
+// The method GetLastGipsError must be defined in the calling scope.
+#define LOG_GIPSERR0(func) \
+ LOG_GIPSERR0_EX(func, GetLastGipsError())
+#define LOG_GIPSERR1(func, a1) \
+ LOG_GIPSERR1_EX(func, a1, GetLastGipsError())
+#define LOG_GIPSERR2(func, a1, a2) \
+ LOG_GIPSERR2_EX(func, a1, a2, GetLastGipsError())
+#define LOG_GIPSERR3(func, a1, a2, a3) \
+ LOG_GIPSERR3_EX(func, a1, a2, a3, GetLastGipsError())
+#define LOG_GIPSERR0_EX(func, err) LOG(LS_WARNING) \
+ << "GIPSVE_" << #func << "() failed, err=" << err
+#define LOG_GIPSERR1_EX(func, a1, err) LOG(LS_WARNING) \
+ << "GIPSVE_" << #func << "(" << a1 << ") failed, err=" << err
+#define LOG_GIPSERR2_EX(func, a1, a2, err) LOG(LS_WARNING) \
+ << "GIPSVE_" << #func << "(" << a1 << ", " << a2 << ") failed, err=" \
+ << err
+#define LOG_GIPSERR3_EX(func, a1, a2, a3, err) LOG(LS_WARNING) \
+ << "GIPSVE_" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+ << ") failed, err=" << err
+
+// automatically handles lifetime of GIPS VoiceEngine
+class scoped_gips_engine {
+ public:
+ explicit scoped_gips_engine(GIPSVoiceEngine* e) : ptr(e) {}
+ // VERIFY, to ensure that there are no leaks at shutdown
+ ~scoped_gips_engine() { if (ptr) VERIFY(GIPSVoiceEngine::Delete(ptr)); }
+ GIPSVoiceEngine* get() const { return ptr; }
+ private:
+ GIPSVoiceEngine* ptr;
+};
+
+// scoped_ptr class to handle obtaining and releasing GIPS interface pointers
+template<class T> class scoped_gips_ptr {
+ public:
+ explicit scoped_gips_ptr(const scoped_gips_engine& e)
+ : ptr(T::GIPSVE_GetInterface(e.get())) {}
+ explicit scoped_gips_ptr(T* p) : ptr(p) {}
+ ~scoped_gips_ptr() { if (ptr) ptr->GIPSVE_Release(); }
+ T* operator->() const { return ptr; }
+ T* get() const { return ptr; }
+ private:
+ T* ptr;
+};
+
+// allows one to do gips_cast<GIPSVEXXXX>(engine)->foo()
+#define gips_cast scoped_gips_ptr
+
+// Utility class for aggregating the various GIPS interface.
+// Fake implementations can also be injected for testing.
+class GipsWrapper {
+ public:
+ GipsWrapper()
+ : engine_(GIPSVoiceEngine::Create()),
+ base_(engine_),
+ codec_(engine_),
+ dtmf_(engine_),
+ file_(engine_),
+ hw_(engine_),
+ media_(engine_),
+ neteq_(engine_),
+ network_(engine_),
+ rtp_(engine_),
+ sync_(engine_),
+ volume_(engine_),
+ vqe_(engine_) {
+ }
+ GipsWrapper(GIPSVEBase* base, GIPSVECodec* codec, GIPSVEDTMF* dtmf,
+ GIPSVEFile* file, GIPSVEHardware* hw,
+ GIPSVEExternalMedia* media, GIPSVENetEqStats* neteq,
+ GIPSVENetwork* network, GIPSVERTP_RTCP* rtp,
+ GIPSVEVideoSync* sync, GIPSVEVolumeControl* volume,
+ GIPSVEVQE* vqe)
+ : engine_(NULL),
+ base_(base),
+ codec_(codec),
+ dtmf_(dtmf),
+ file_(file),
+ hw_(hw),
+ media_(media),
+ neteq_(neteq),
+ network_(network),
+ rtp_(rtp),
+ sync_(sync),
+ volume_(volume),
+ vqe_(vqe) {
+ }
+ virtual ~GipsWrapper() {}
+ GIPSVEBase* base() { return base_.get(); }
+ GIPSVECodec* codec() { return codec_.get(); }
+ GIPSVEDTMF* dtmf() { return dtmf_.get(); }
+ GIPSVEFile* file() { return file_.get(); }
+ GIPSVEHardware* hw() { return hw_.get(); }
+ GIPSVEExternalMedia* media() { return media_.get(); }
+ GIPSVENetEqStats* neteq() { return neteq_.get(); }
+ GIPSVENetwork* network() { return network_.get(); }
+ GIPSVERTP_RTCP* rtp() { return rtp_.get(); }
+ GIPSVEVideoSync* sync() { return sync_.get(); }
+ GIPSVEVolumeControl* volume() { return volume_.get(); }
+ GIPSVEVQE* vqe() { return vqe_.get(); }
+ int error() { return base_->GIPSVE_LastError(); }
+
+ private:
+ scoped_gips_engine engine_;
+ scoped_gips_ptr<GIPSVEBase> base_;
+ scoped_gips_ptr<GIPSVECodec> codec_;
+ scoped_gips_ptr<GIPSVEDTMF> dtmf_;
+ scoped_gips_ptr<GIPSVEFile> file_;
+ scoped_gips_ptr<GIPSVEHardware> hw_;
+ scoped_gips_ptr<GIPSVEExternalMedia> media_;
+ scoped_gips_ptr<GIPSVENetEqStats> neteq_;
+ scoped_gips_ptr<GIPSVENetwork> network_;
+ scoped_gips_ptr<GIPSVERTP_RTCP> rtp_;
+ scoped_gips_ptr<GIPSVEVideoSync> sync_;
+ scoped_gips_ptr<GIPSVEVolumeControl> volume_;
+ scoped_gips_ptr<GIPSVEVQE> vqe_;
+};
+
+// Adds indirection to static GIPS functions, allowing them to be mocked.
+class GipsTraceWrapper {
+ public:
+ virtual ~GipsTraceWrapper() {}
+
+ virtual int SetTraceFilter(const unsigned int filter) {
+ return GIPSVoiceEngine::SetTraceFilter(filter);
+ }
+ virtual int SetTraceFile(const char* fileNameUTF8) {
+ return GIPSVoiceEngine::SetTraceFile(fileNameUTF8);
+ }
+ virtual int SetEncryptedTraceFile(const char* fileNameUTF8) {
+ return GIPSVoiceEngine::SetEncryptedTraceFile(fileNameUTF8);
+ }
+ virtual int SetTraceCallback(GIPSTraceCallback* callback) {
+ return GIPSVoiceEngine::SetTraceCallback(callback);
+ }
+};
+
+#endif // TALK_SESSION_PHONE_GIPS_H_
diff --git a/talk/session/phone/gipsmediaengine.cc b/talk/session/phone/gipsmediaengine.cc
new file mode 100644
index 0000000..4f8efa2
--- /dev/null
+++ b/talk/session/phone/gipsmediaengine.cc
@@ -0,0 +1,2030 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// shhhhh{
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+// shhhhh}
+
+#ifdef HAVE_GIPS
+
+#include "talk/session/phone/gipsmediaengine.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <string>
+#include <vector>
+
+#include "talk/base/base64.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/common.h"
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/base/stringutils.h"
+
+#ifdef WIN32
+#include <objbase.h> // NOLINT
+#endif
+
+namespace cricket {
+
+// For Linux/Mac, using the default device is done by specifying index 0 for
+// VoE 4.0 and not -1 (which was the case for VoE 3.5).
+//
+// On Windows Vista and newer, Microsoft introduced the concept of "Default
+// Communications Device". This means that there are two types of default
+// devices (old Wave Audio style default and Default Communications Device).
+//
+// On Windows systems which only support Wave Audio style default, uses either
+// -1 or 0 to select the default device.
+//
+// On Windows systems which support both "Default Communication Device" and
+// old Wave Audio style default, use -1 for Default Communications Device and
+// -2 for Wave Audio style default, which is what we want to use for clips.
+// It's not clear yet whether the -2 index is handled properly on other OSes.
+
+#ifdef WIN32
+static const int kDefaultAudioDeviceId = -1;
+static const int kDefaultSoundclipDeviceId = -2;
+#else
+static const int kDefaultAudioDeviceId = 0;
+#endif
+
+// extension header for audio levels, as defined in
+// http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-03
+static const char kRtpAudioLevelHeaderExtension[] =
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level";
+
+static void GipsLogMultiline(talk_base::LoggingSeverity sev, char* text) {
+ const char* delim = "\r\n";
+ for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
+ LOG_V(sev) << tok;
+ }
+}
+
+// GipsVoiceEngine
+const GipsVoiceEngine::CodecPref GipsVoiceEngine::kCodecPrefs[] = {
+ { "ISAC", 16000 },
+ { "ISAC", 32000 },
+ { "ISACLC", 16000 },
+ { "speex", 16000 },
+ { "IPCMWB", 16000 },
+ { "G722", 16000 },
+ { "iLBC", 8000 },
+ { "speex", 8000 },
+ { "GSM", 8000 },
+ { "EG711U", 8000 },
+ { "EG711A", 8000 },
+ { "PCMU", 8000 },
+ { "PCMA", 8000 },
+ { "CN", 32000 },
+ { "CN", 16000 },
+ { "CN", 8000 },
+ { "red", 8000 },
+ { "telephone-event", 8000 },
+};
+
+class GipsSoundclipMedia : public SoundclipMedia {
+ public:
+ explicit GipsSoundclipMedia(GipsVoiceEngine *engine)
+ : engine_(engine), gips_channel_(-1) {
+ engine_->RegisterSoundclip(this);
+ }
+
+ virtual ~GipsSoundclipMedia() {
+ engine_->UnregisterSoundclip(this);
+ if (gips_channel_ != -1) {
+ if (engine_->gips_sc()->base()->GIPSVE_DeleteChannel(gips_channel_)
+ == -1) {
+ LOG_GIPSERR1(DeleteChannel, gips_channel_);
+ }
+ }
+ }
+
+ bool Init() {
+ gips_channel_ = engine_->gips_sc()->base()->GIPSVE_CreateChannel();
+ if (gips_channel_ == -1) {
+ LOG_GIPSERR0(CreateChannel);
+ return false;
+ }
+ return true;
+ }
+
+ bool Enable() {
+ if (engine_->gips_sc()->base()->GIPSVE_StartPlayout(gips_channel_) == -1) {
+ LOG_GIPSERR1(StartPlayout, gips_channel_);
+ return false;
+ }
+ return true;
+ }
+
+ bool Disable() {
+ if (engine_->gips_sc()->base()->GIPSVE_StopPlayout(gips_channel_) == -1) {
+ LOG_GIPSERR1(StopPlayout, gips_channel_);
+ return false;
+ }
+ return true;
+ }
+
+ virtual bool PlaySound(const char *buf, int len, int flags) {
+ // Must stop playing the current sound (if any), because we are about to
+ // modify the stream.
+ if (engine_->gips_sc()->file()->GIPSVE_StopPlayingFileLocally(gips_channel_)
+ == -1) {
+ LOG_GIPSERR1(StopPlayingFileLocally, gips_channel_);
+ return false;
+ }
+
+ if (buf) {
+ stream_.reset(new GipsSoundclipStream(buf, len));
+ stream_->set_loop((flags & SF_LOOP) != 0);
+ stream_->Rewind();
+
+ // Play it.
+ if (engine_->gips_sc()->file()->GIPSVE_StartPlayingFileLocally(
+ gips_channel_, stream_.get()) == -1) {
+ LOG_GIPSERR2(StartPlayingFileLocally, gips_channel_, stream_.get());
+ LOG(LS_ERROR) << "Unable to start soundclip";
+ return false;
+ }
+ } else {
+ stream_.reset();
+ }
+ return true;
+ }
+
+ int GetLastGipsError() { return engine_->gips_sc()->error(); }
+
+ private:
+ GipsVoiceEngine *engine_;
+ int gips_channel_;
+ talk_base::scoped_ptr<GipsSoundclipStream> stream_;
+};
+
+GipsVoiceEngine::GipsVoiceEngine()
+ : gips_(new GipsWrapper()),
+ gips_sc_(new GipsWrapper()),
+ tracing_(new GipsTraceWrapper()),
+ log_level_(kDefaultLogSeverity),
+ is_dumping_aec_(false),
+ desired_local_monitor_enable_(false) {
+ Construct();
+}
+
+GipsVoiceEngine::GipsVoiceEngine(
+ GipsWrapper* gips, GipsWrapper* gips_sc, GipsTraceWrapper* tracing)
+ : gips_(gips),
+ gips_sc_(gips_sc),
+ tracing_(tracing),
+ log_level_(kDefaultLogSeverity),
+ is_dumping_aec_(false),
+ desired_local_monitor_enable_(false) {
+ Construct();
+}
+
+void GipsVoiceEngine::Construct() {
+ initialized_ = false;
+ LOG(LS_VERBOSE) << "GipsVoiceEngine::GipsVoiceEngine";
+ ApplyLogging("");
+ if (tracing_->SetTraceCallback(this) == -1) {
+ LOG_GIPSERR0(SetTraceCallback);
+ }
+ if (gips_->base()->GIPSVE_RegisterVoiceEngineObserver(*this) == -1) {
+ LOG_GIPSERR0(RegisterVoiceEngineObserver);
+ }
+ // Clear the default agc state.
+ memset(&default_agc_config_, 0, sizeof(default_agc_config_));
+
+ // Load our audio codec list
+ LOG(LS_INFO) << "GIPS VoiceEngine codecs:";
+ int ncodecs = gips_->codec()->GIPSVE_NumOfCodecs();
+ for (int i = 0; i < ncodecs; ++i) {
+ GIPS_CodecInst gcodec;
+ if (gips_->codec()->GIPSVE_GetCodec(i, gcodec) >= 0) {
+ int pref = GetCodecPreference(gcodec.plname, gcodec.plfreq);
+ if (pref != -1) {
+ if (gcodec.rate == -1) gcodec.rate = 0;
+ AudioCodec codec(gcodec.pltype, gcodec.plname, gcodec.plfreq,
+ gcodec.rate, gcodec.channels, pref);
+ LOG(LS_INFO) << gcodec.plname << "/" << gcodec.plfreq << "/"
+ << gcodec.channels << " " << gcodec.pltype;
+ codecs_.push_back(codec);
+ }
+ }
+ }
+ // Make sure they are in local preference order
+ std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
+}
+
+GipsVoiceEngine::~GipsVoiceEngine() {
+ LOG(LS_VERBOSE) << "GipsVoiceEngine::~GipsVoiceEngine";
+ if (gips_->base()->GIPSVE_DeRegisterVoiceEngineObserver() == -1) {
+ LOG_GIPSERR0(DeRegisterVoiceEngineObserver);
+ }
+ tracing_->SetTraceCallback(NULL);
+}
+
+bool GipsVoiceEngine::Init() {
+ LOG(LS_INFO) << "GipsVoiceEngine::Init";
+ bool res = InitInternal();
+ if (res) {
+ LOG(LS_INFO) << "GipsVoiceEngine::Init Done!";
+ } else {
+ LOG(LS_ERROR) << "GipsVoiceEngine::Init failed";
+ Terminate();
+ }
+ return res;
+}
+
+bool GipsVoiceEngine::InitInternal() {
+ // Temporarily turn logging level up for the GIPSVE_Init call
+ int old_level = log_level_;
+ log_level_ = talk_base::_min(log_level_,
+ static_cast<int>(talk_base::LS_INFO));
+ ApplyLogging("");
+
+#if defined(LINUX) && !defined(HAVE_LIBPULSE)
+ gips_->hw()->GIPSVE_SetAudioDeviceLayer(GIPS_AUDIO_LINUX_ALSA);
+#endif
+#ifdef WIN32
+ // NOTE(fbarchard): Enable this to test WAVE on Windows 7
+ // gips_->hw()->GIPSVE_SetAudioDeviceLayer(GIPS_AUDIO_WINDOWS_WAVE);
+#endif
+// shhhhh{
+#ifdef ANDROID
+ const char AUTH_KEY[] =
+ "====YUtFWRAAAAADBtIHgAAAAAEAAAAcAAAAAQBHU0dsb2J"
+ "hbCBJUCBTb3VuZAACAAAADgAAAGdvb2dsZQAAAABGAAAAEkxO38dEtVtksyvvIn"
+ "eiDc8yJtfB038G6bZQ4zNByUrAnpd9znXNRIe9k4Tjrjn0q7ztOMs2Ge6pqMEYq"
+ "aAEANWCYjZ1kKXkBNJqfZq6TGn894PUJXYKirNVozxjT1E+GzB9xCEDVuTowZIJ"
+ "T7qFBDr2TSCBhg2rA5JTT7Y/+l01DHxOzvOtdvdQucVyp3QbNZlwdFLJEuevKre"
+ "ZnfH+ChYvHXrPDWyJPNvnl9382VFbk/5ZVbdoCaHi4ISDWn9C=6wIv";
+
+ if (gips_->base()->GIPSVE_Authenticate(AUTH_KEY, strlen(AUTH_KEY)) != 0) {
+ LOG_GIPSERR0(Authenticate);
+ return false;
+ }
+#endif
+// shhhhh}
+
+ // Init GIPS VoiceEngine, enabling AEC logging if specified in SetLogging.
+ if (gips_->base()->GIPSVE_Init() == -1) {
+ LOG_GIPSERR0_EX(Init, gips_->error());
+ return false;
+ }
+
+ // Restore the previous log level and apply the log filter.
+ log_level_ = old_level;
+ ApplyLogging(log_filter_);
+
+ // Log the GIPS version info
+ char buffer[1024] = "";
+ gips_->base()->GIPSVE_GetVersion(buffer);
+ LOG(LS_INFO) << "GIPS VoiceEngine Version:";
+ GipsLogMultiline(talk_base::LS_INFO, buffer);
+
+ // Turn on AEC, AGC, NS and typing detection by default.
+ int options = MediaEngineInterface::ECHO_CANCELLATION
+ | MediaEngineInterface::AUTO_GAIN_CONTROL
+ | MediaEngineInterface::NOISE_SUPPRESSION
+#if !defined(IOS) && !defined(ANDROID)
+ | MediaEngineInterface::TYPING_DETECTION
+#endif
+ ;
+ if (!SetOptions(options)) {
+ return false;
+ }
+
+ // Save the default AGC configuration settings.
+ if (gips_->vqe()->GIPSVE_GetAGCConfig(default_agc_config_) == -1) {
+ LOG_GIPSERR0(GetAGCConfig);
+ return false;
+ }
+
+ // Print our codec list again for the call diagnostic log
+ LOG(LS_INFO) << "GIPS VoiceEngine codecs:";
+ for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
+ it != codecs_.end(); ++it) {
+ LOG(LS_INFO) << it->name << "/" << it->clockrate << "/"
+ << it->channels << " " << it->id;
+ }
+
+#if defined(LINUX) && !defined(HAVE_LIBPULSE)
+ gips_sc_->hw()->GIPSVE_SetAudioDeviceLayer(GIPS_AUDIO_LINUX_ALSA);
+#endif
+
+// shhhhh{
+#ifdef ANDROID
+ if (gips_sc_->base()->GIPSVE_Authenticate(AUTH_KEY, strlen(AUTH_KEY)) != 0) {
+ LOG_GIPSERR0(Authenticate);
+ return false;
+ }
+#endif
+// shhhhh}
+
+ // Initialize the GIPS instance that we'll use to play out sound clips.
+ if (gips_sc_->base()->GIPSVE_Init() == -1) {
+ LOG_GIPSERR0_EX(Init, gips_sc_->error());
+ return false;
+ }
+
+ // On Windows, tell it to use the default sound (not communication) devices.
+ // First check whether there is a valid sound device for playback.
+ // TODO(juberti): Clean this up when we support setting the soundclip device.
+#ifdef WIN32
+ int num_of_devices = 0;
+ if (gips_sc_->hw()->GIPSVE_GetNumOfPlayoutDevices(num_of_devices) != -1 &&
+ num_of_devices > 0) {
+ if (gips_sc_->hw()->GIPSVE_SetPlayoutDevice(kDefaultSoundclipDeviceId)
+ == -1) {
+ LOG_GIPSERR1_EX(SetPlayoutDevice, kDefaultSoundclipDeviceId,
+ gips_sc_->error());
+ return false;
+ }
+ } else {
+ LOG(LS_WARNING) << "No valid sound playout device found.";
+ }
+#endif
+
+ initialized_ = true;
+ return true;
+}
+
+void GipsVoiceEngine::Terminate() {
+ LOG(LS_INFO) << "GipsVoiceEngine::Terminate";
+ initialized_ = false;
+
+ if (is_dumping_aec_) {
+ if (gips_->vqe()->GIPSVE_StopDebugRecording() == -1) {
+ LOG_GIPSERR0(StopDebugRecording);
+ }
+ is_dumping_aec_ = false;
+ }
+
+ gips_sc_->base()->GIPSVE_Terminate();
+ gips_->base()->GIPSVE_Terminate();
+ desired_local_monitor_enable_ = false;
+}
+
+int GipsVoiceEngine::GetCapabilities() {
+ return AUDIO_SEND | AUDIO_RECV;
+}
+
+VoiceMediaChannel *GipsVoiceEngine::CreateChannel() {
+ GipsVoiceMediaChannel* ch = new GipsVoiceMediaChannel(this);
+ if (!ch->valid()) {
+ delete ch;
+ ch = NULL;
+ }
+ return ch;
+}
+
+SoundclipMedia *GipsVoiceEngine::CreateSoundclip() {
+ GipsSoundclipMedia *soundclip = new GipsSoundclipMedia(this);
+ if (!soundclip->Init() || !soundclip->Enable()) {
+ delete soundclip;
+ return NULL;
+ }
+ return soundclip;
+}
+
+bool GipsVoiceEngine::SetOptions(int options) {
+ // GIPS tells us that "auto" mode doesn't work too well, so we don't use it.
+ bool aec = (options & MediaEngineInterface::ECHO_CANCELLATION) ? true : false;
+ bool agc = (options & MediaEngineInterface::AUTO_GAIN_CONTROL) ? true : false;
+ bool ns = (options & MediaEngineInterface::NOISE_SUPPRESSION) ? true : false;
+
+#if !defined(IOS) && !defined(ANDROID)
+ bool typing_detection =
+ (options & MediaEngineInterface::TYPING_DETECTION) ? true : false;
+
+ if (gips_->vqe()->GIPSVE_SetECStatus(aec) == -1) {
+ LOG_GIPSERR1(SetECStatus, aec);
+ return false;
+ }
+
+ if (gips_->vqe()->GIPSVE_SetAGCStatus(agc) == -1) {
+ LOG_GIPSERR1(SetAGCStatus, agc);
+ return false;
+ }
+
+ // TODO(pthatcher): The existing Talk Plugin's on-disk preference settings
+ // won't include NOISE_SUPPRESSION or TYPING_DETECTION, so those flags won't
+ // get passed in. For desktop, we hard-code NS and typing detection to
+ // true for now, until we can solve this.
+ if (gips_->vqe()->GIPSVE_SetNSStatus(true) == -1) {
+ LOG_GIPSERR1(SetNSStatus, ns);
+ return false;
+ }
+
+ if (gips_->vqe()->GIPSVE_SetTypingDetectionStatus(true) == -1) {
+ // In case of error, log the info and continue
+ LOG_GIPSERR1(SetTypingDetectionStatus, typing_detection);
+ }
+#else
+ if (gips_->vqe()->GIPSVE_SetECStatus(aec, GIPS_EC_AECM) == -1) {
+ LOG_GIPSERR2(SetECStatus, aec, GIPS_EC_AECM);
+ return false;
+ }
+
+ if (aec) {
+ // Use speakerphone mode with comfort noise generation for mobile.
+ if (gips_->vqe()->GIPSVE_SetAECMMode(GIPS_AECM_SPEAKERPHONE, true) != 0) {
+ LOG_GIPSERR2(SetAECMMode, GIPS_AECM_SPEAKERPHONE, true);
+ }
+ }
+
+ // On mobile, GIPS recommends fixed AGC (not adaptive)
+ if (gips_->vqe()->GIPSVE_SetAGCStatus(agc, GIPS_AGC_FIXED_DIGITAL) == -1) {
+ LOG_GIPSERR2(SetAGCStatus, agc, GIPS_AGC_FIXED_DIGITAL);
+ return false;
+ }
+
+ // On mobile, GIPS recommends moderate aggressiveness.
+ if (gips_->vqe()->GIPSVE_SetNSStatus(ns,
+ GIPS_NS_MODERATE_SUPPRESSION) == -1) {
+ LOG_GIPSERR2(SetNSStatus, ns, GIPS_NS_MODERATE_SUPPRESSION);
+ return false;
+ }
+
+ // No typing detection support on iOS or Android.
+#endif // !IOS && !ANDROID
+
+ return true;
+}
+
+struct ResumeEntry {
+ ResumeEntry(GipsVoiceMediaChannel *c, bool p, SendFlags s)
+ : channel(c),
+ playout(p),
+ send(s) {
+ }
+
+ GipsVoiceMediaChannel *channel;
+ bool playout;
+ SendFlags send;
+};
+
+// TODO(juberti): Refactor this so that the core logic can be used to set the
+// soundclip device. At that time, reinstate the soundclip pause/resume code.
+bool GipsVoiceEngine::SetDevices(const Device* in_device,
+ const Device* out_device) {
+#if !defined(IOS) && !defined(ANDROID)
+ int in_id = in_device ? talk_base::FromString<int>(in_device->id) :
+ kDefaultAudioDeviceId;
+ int out_id = out_device ? talk_base::FromString<int>(out_device->id) :
+ kDefaultAudioDeviceId;
+ // The device manager uses -1 as the default device, which was the case for
+ // VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
+#ifndef WIN32
+ if (-1 == in_id) {
+ in_id = kDefaultAudioDeviceId;
+ }
+ if (-1 == out_id) {
+ out_id = kDefaultAudioDeviceId;
+ }
+#endif
+
+ std::string in_name = (in_id != kDefaultAudioDeviceId) ?
+ in_device->name : "Default device";
+ std::string out_name = (out_id != kDefaultAudioDeviceId) ?
+ out_device->name : "Default device";
+ LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
+ << ") and speaker to (id=" << out_id << ", name=" << out_name
+ << ")";
+
+ // If we're running the local monitor, we need to stop it first.
+ bool ret = true;
+ if (!PauseLocalMonitor()) {
+ LOG(LS_WARNING) << "Failed to pause local monitor";
+ ret = false;
+ }
+
+ // Must also pause all audio playback and capture.
+ for (ChannelList::const_iterator i = channels_.begin();
+ i != channels_.end(); ++i) {
+ GipsVoiceMediaChannel *channel = *i;
+ if (!channel->PausePlayout()) {
+ LOG(LS_WARNING) << "Failed to pause playout";
+ ret = false;
+ }
+ if (!channel->PauseSend()) {
+ LOG(LS_WARNING) << "Failed to pause send";
+ ret = false;
+ }
+ }
+
+ // Find the recording device id in GIPS and set recording device.
+ if (!FindGipsAudioDeviceId(true, in_name, in_id, &in_id)) {
+ ret = false;
+ }
+ if (ret) {
+ if (gips_->hw()->GIPSVE_SetRecordingDevice(in_id) == -1) {
+ LOG_GIPSERR2(SetRecordingDevice, in_device->name, in_id);
+ ret = false;
+ }
+ }
+
+ // Find the playout device id in GIPS and set playout device.
+ if (!FindGipsAudioDeviceId(false, out_name, out_id, &out_id)) {
+ LOG(LS_WARNING) << "Failed to find gips device id for " << out_name;
+ ret = false;
+ }
+ if (ret) {
+ if (gips_->hw()->GIPSVE_SetPlayoutDevice(out_id) == -1) {
+ LOG_GIPSERR2(SetPlayoutDevice, out_device->name, out_id);
+ ret = false;
+ }
+ }
+
+ // Resume all audio playback and capture.
+ for (ChannelList::const_iterator i = channels_.begin();
+ i != channels_.end(); ++i) {
+ GipsVoiceMediaChannel *channel = *i;
+ if (!channel->ResumePlayout()) {
+ LOG(LS_WARNING) << "Failed to resume playout";
+ ret = false;
+ }
+ if (!channel->ResumeSend()) {
+ LOG(LS_WARNING) << "Failed to resume send";
+ ret = false;
+ }
+ }
+
+ // Resume local monitor.
+ if (!ResumeLocalMonitor()) {
+ LOG(LS_WARNING) << "Failed to resume local monitor";
+ ret = false;
+ }
+
+ if (ret) {
+ LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
+ << ") and speaker to (id="<< out_id << " name=" << out_name
+ << ")";
+ }
+
+ return ret;
+#else
+ return true;
+#endif // !IOS && !ANDROID
+}
+
+bool GipsVoiceEngine::FindGipsAudioDeviceId(
+ bool is_input, const std::string& dev_name, int dev_id, int* gips_id) {
+ // In Linux, GIPS uses the same device dev_id as the device manager.
+#ifdef LINUX
+ *gips_id = dev_id;
+ return true;
+#else
+ // In Windows and Mac, we need to find the GIPS device id by name unless the
+ // input dev_id is the default device id.
+ if (kDefaultAudioDeviceId == dev_id) {
+ *gips_id = dev_id;
+ return true;
+ }
+
+ // Get the number of GIPS audio devices.
+ int count = 0;
+ if (is_input) {
+ if (-1 == gips_->hw()->GIPSVE_GetNumOfRecordingDevices(count)) {
+ LOG_GIPSERR0(GetNumOfRecordingDevices);
+ return false;
+ }
+ } else {
+ if (-1 == gips_->hw()->GIPSVE_GetNumOfPlayoutDevices(count)) {
+ LOG_GIPSERR0(GetNumOfPlayoutDevices);
+ return false;
+ }
+ }
+
+ for (int i = 0; i < count; ++i) {
+ char name[128];
+ char guid[128];
+ if (is_input) {
+ gips_->hw()->GIPSVE_GetRecordingDeviceName(i, name, guid);
+ LOG(LS_VERBOSE) << "GIPS microphone " << i << ": " << name;
+ } else {
+ gips_->hw()->GIPSVE_GetPlayoutDeviceName(i, name, guid);
+ LOG(LS_VERBOSE) << "GIPS speaker " << i << ": " << name;
+ }
+
+ std::string gips_name(name);
+ if (dev_name.compare(0, gips_name.size(), gips_name) == 0) {
+ *gips_id = i;
+ return true;
+ }
+ }
+ LOG(LS_WARNING) << "GIPS cannot find device: " << dev_name;
+ return false;
+#endif
+}
+
+bool GipsVoiceEngine::GetOutputVolume(int* level) {
+ unsigned int ulevel;
+ if (gips_->volume()->GIPSVE_GetSpeakerVolume(ulevel) == -1) {
+ LOG_GIPSERR1(GetSpeakerVolume, level);
+ return false;
+ }
+ *level = ulevel;
+ return true;
+}
+
+bool GipsVoiceEngine::SetOutputVolume(int level) {
+ ASSERT(level >= 0 && level <= 255);
+ if (gips_->volume()->GIPSVE_SetSpeakerVolume(level) == -1) {
+ LOG_GIPSERR1(SetSpeakerVolume, level);
+ return false;
+ }
+ return true;
+}
+
+int GipsVoiceEngine::GetInputLevel() {
+ unsigned int ulevel;
+ return (gips_->volume()->GIPSVE_GetSpeechInputLevel(ulevel) != -1) ?
+ static_cast<int>(ulevel) : -1;
+}
+
+bool GipsVoiceEngine::SetLocalMonitor(bool enable) {
+ desired_local_monitor_enable_ = enable;
+ return ChangeLocalMonitor(desired_local_monitor_enable_);
+}
+
+bool GipsVoiceEngine::ChangeLocalMonitor(bool enable) {
+ if (enable && !monitor_.get()) {
+ monitor_.reset(new GipsMonitorStream);
+ if (gips_->file()->GIPSVE_StartRecordingMicrophone(monitor_.get()) == -1) {
+ LOG_GIPSERR1(StartRecordingMicrophone, monitor_.get());
+ // Must call Stop() because there are some cases where Start will report
+ // failure but still change the state, and if we leave VE in the on state
+ // then it could crash later when trying to invoke methods on our monitor.
+ gips_->file()->GIPSVE_StopRecordingMicrophone();
+ monitor_.reset();
+ return false;
+ }
+ } else if (!enable && monitor_.get()) {
+ gips_->file()->GIPSVE_StopRecordingMicrophone();
+ monitor_.reset();
+ }
+ return true;
+}
+
+bool GipsVoiceEngine::PauseLocalMonitor() {
+ return ChangeLocalMonitor(false);
+}
+
+bool GipsVoiceEngine::ResumeLocalMonitor() {
+ return ChangeLocalMonitor(desired_local_monitor_enable_);
+}
+
+const std::vector<AudioCodec>& GipsVoiceEngine::codecs() {
+ return codecs_;
+}
+
+bool GipsVoiceEngine::FindCodec(const AudioCodec& in) {
+ return FindGIPSCodec(in, NULL);
+}
+
+bool GipsVoiceEngine::FindGIPSCodec(const AudioCodec& in,
+ GIPS_CodecInst* out) {
+ int ncodecs = gips_->codec()->GIPSVE_NumOfCodecs();
+ for (int i = 0; i < ncodecs; ++i) {
+ GIPS_CodecInst gcodec;
+ if (gips_->codec()->GIPSVE_GetCodec(i, gcodec) >= 0) {
+ AudioCodec codec(gcodec.pltype, gcodec.plname,
+ gcodec.plfreq, gcodec.rate, gcodec.channels, 0);
+ if (codec.Matches(in)) {
+ if (out) {
+ // If the codec is VBR and an explicit rate is specified, use it.
+ if (in.bitrate != 0 && gcodec.rate == -1) {
+ gcodec.rate = in.bitrate;
+ }
+ *out = gcodec;
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void GipsVoiceEngine::SetLogging(int min_sev, const char* filter) {
+ // if min_sev == -1, we keep the current log level.
+ if (min_sev >= 0) {
+ log_level_ = min_sev;
+ }
+ log_filter_ = filter;
+ ApplyLogging(initialized_ ? log_filter_ : "");
+}
+
+int GipsVoiceEngine::GetLastGipsError() {
+ return gips_->error();
+}
+
+// We suppport three different logging settings for GIPS:
+// 1. Observer callback that goes into talk diagnostic logfile.
+// Use --logfile and --loglevel
+//
+// 2. Encrypted GIPS log for debugging VoiceEngine.
+// Use --voice_loglevel --voice_logfilter "tracefile file_name"
+//
+// 3. EC log and dump for debugging QualityEngine.
+// Use --voice_loglevel --voice_logfilter "recordEC file_name"
+//
+// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
+// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
+void GipsVoiceEngine::ApplyLogging(const std::string& log_filter) {
+ // Set log level.
+ int filter = 0;
+ switch (log_level_) {
+ case talk_base::LS_VERBOSE:
+ filter |= GIPS::TR_ALL; // fall through
+ case talk_base::LS_INFO:
+ filter |= GIPS::TR_STATE_INFO; // fall through
+ case talk_base::LS_WARNING:
+ filter |= (GIPS::TR_INFO | GIPS::TR_WARNING); // fall through
+ case talk_base::LS_ERROR:
+ filter |= (GIPS::TR_ERROR | GIPS::TR_CRITICAL);
+ }
+ tracing_->SetTraceFilter(filter);
+
+ // Set encrypted trace file.
+ std::vector<std::string> opts;
+ talk_base::tokenize(log_filter, ' ', '"', '"', &opts);
+ std::vector<std::string>::iterator tracefile =
+ std::find(opts.begin(), opts.end(), "tracefile");
+ if (tracefile != opts.end() && ++tracefile != opts.end()) {
+ // Write encrypted debug output (at same loglevel) to file
+ if (tracing_->SetEncryptedTraceFile(tracefile->c_str()) == -1) {
+ LOG_GIPSERR1(SetEncryptedTraceFileName, *tracefile);
+ }
+ }
+
+ // Set AEC dump file
+ std::vector<std::string>::iterator recordEC =
+ std::find(opts.begin(), opts.end(), "recordEC");
+ if (recordEC != opts.end()) {
+ ++recordEC;
+ if (recordEC != opts.end() && !is_dumping_aec_) {
+ // Start dumping AEC when we are not dumping and recordEC has a filename.
+ if (gips_->vqe()->GIPSVE_StartDebugRecording(recordEC->c_str()) == -1) {
+ LOG_GIPSERR0(StartDebugRecording);
+ } else {
+ is_dumping_aec_ = true;
+ }
+ } else if (recordEC == opts.end() && is_dumping_aec_) {
+ // Stop dumping EC when we are dumping and recordEC has no filename.
+ if (gips_->vqe()->GIPSVE_StopDebugRecording() == -1) {
+ LOG_GIPSERR0(StopDebugRecording);
+ }
+ is_dumping_aec_ = false;
+ }
+ }
+}
+
+// Ignore spammy trace messages, mostly from the stats API when we haven't
+// gotten RTCP info yet from the remote side.
+static bool ShouldIgnoreTrace(const std::string& trace) {
+ static const char* kTracesToIgnore[] = {
+ "\tfailed to GetReportBlockInformation",
+ "GetRecCodec() failed to get received codec",
+ "GetRemoteRTCPData() failed to retrieve sender info for remote side",
+ "GetRTPStatistics() failed to measure RTT since no RTP packets have been received yet", // NOLINT
+ "GetRTPStatistics() failed to read RTP statistics from the RTP/RTCP module",
+ "GetRTPStatistics() failed to retrieve RTT from the RTP/RTCP module",
+ "RTCPReceiver::SenderInfoReceived No received SR",
+ "StatisticsRTP() no statisitics availble",
+ NULL
+ };
+ for (const char* const* p = kTracesToIgnore; *p; ++p) {
+ if (trace.find(*p) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void GipsVoiceEngine::Print(const GIPS::TraceLevel level,
+ const char* trace, const int length) {
+ talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
+ if (level == GIPS::TR_ERROR || level == GIPS::TR_CRITICAL)
+ sev = talk_base::LS_ERROR;
+ else if (level == GIPS::TR_WARNING)
+ sev = talk_base::LS_WARNING;
+ else if (level == GIPS::TR_STATE_INFO || level == GIPS::TR_INFO)
+ sev = talk_base::LS_INFO;
+
+ if (sev >= log_level_) {
+ // Skip past gips boilerplate prefix text
+ if (length < 72) {
+ std::string msg(trace, length);
+ LOG(LS_ERROR) << "Malformed GIPS log message: ";
+ LOG_V(sev) << msg;
+ } else {
+ std::string msg(trace + 71, length - 72);
+ if (!ShouldIgnoreTrace(msg)) {
+ LOG_V(sev) << "GIPS_VE: " << msg;
+ }
+ }
+ }
+}
+
+void GipsVoiceEngine::CallbackOnError(const int channel_num,
+ const int err_code) {
+ talk_base::CritScope lock(&channels_cs_);
+ GipsVoiceMediaChannel* channel = NULL;
+ uint32 ssrc = 0;
+ LOG(LS_WARNING) << "GIPS error " << err_code << " reported on channel "
+ << channel_num << ".";
+ if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
+ ASSERT(channel != NULL);
+ channel->OnError(ssrc, err_code);
+ } else {
+ LOG(LS_ERROR) << "GIPS channel " << channel_num
+ << " could not be found in the channel list when error reported.";
+ }
+}
+
+int GipsVoiceEngine::GetCodecPreference(const char *name, int clockrate) {
+ for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
+ if ((_stricmp(kCodecPrefs[i].name, name) == 0) &&
+ (kCodecPrefs[i].clockrate == clockrate))
+ return ARRAY_SIZE(kCodecPrefs) - i;
+ }
+ LOG(LS_WARNING) << "Unexpected codec \"" << name << "/" << clockrate << "\"";
+ return -1;
+}
+
+bool GipsVoiceEngine::FindChannelAndSsrc(
+ int channel_num, GipsVoiceMediaChannel** channel, uint32* ssrc) const {
+ ASSERT(channel != NULL && ssrc != NULL);
+
+ *channel = NULL;
+ *ssrc = 0;
+ // Find corresponding channel and ssrc
+ for (ChannelList::const_iterator it = channels_.begin();
+ it != channels_.end(); ++it) {
+ ASSERT(*it != NULL);
+ if ((*it)->FindSsrc(channel_num, ssrc)) {
+ *channel = *it;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void GipsVoiceEngine::RegisterChannel(GipsVoiceMediaChannel *channel) {
+ talk_base::CritScope lock(&channels_cs_);
+ channels_.push_back(channel);
+}
+
+void GipsVoiceEngine::UnregisterChannel(GipsVoiceMediaChannel *channel) {
+ talk_base::CritScope lock(&channels_cs_);
+ ChannelList::iterator i = std::find(channels_.begin(),
+ channels_.end(),
+ channel);
+ if (i != channels_.end()) {
+ channels_.erase(i);
+ }
+}
+
+void GipsVoiceEngine::RegisterSoundclip(GipsSoundclipMedia *soundclip) {
+ soundclips_.push_back(soundclip);
+}
+
+void GipsVoiceEngine::UnregisterSoundclip(GipsSoundclipMedia *soundclip) {
+ SoundclipList::iterator i = std::find(soundclips_.begin(),
+ soundclips_.end(),
+ soundclip);
+ if (i != soundclips_.end()) {
+ soundclips_.erase(i);
+ }
+}
+
+// Adjusts the default AGC target level by the specified delta.
+// NB: If we start messing with other config fields, we'll want
+// to save the current GIPS_AGC_config as well.
+bool GipsVoiceEngine::AdjustAgcLevel(int delta) {
+ GIPS_AGC_config config = default_agc_config_;
+ config.targetLeveldBOv += delta;
+
+ LOG(LS_INFO) << "Adjusting AGC level from default -"
+ << default_agc_config_.targetLeveldBOv << "dB to -"
+ << config.targetLeveldBOv << "dB";
+
+ if (gips_->vqe()->GIPSVE_SetAGCConfig(config) == -1) {
+ LOG_GIPSERR1(SetAGCConfig, config.targetLeveldBOv);
+ return false;
+ }
+ return true;
+}
+
+// Configures echo cancellation and noise suppression modes according to
+// whether or not we are in a multi-point conference.
+bool GipsVoiceEngine::SetConferenceMode(bool enable) {
+// Only use EC_AECM for mobile.
+#if defined(IOS) || defined(ANDROID)
+ return true;
+#endif
+
+ LOG(LS_INFO) << (enable ? "Enabling" : "Disabling")
+ << " Conference Mode noise reduction";
+
+ // We always configure noise suppression on, so just toggle the mode.
+ const GIPS_NSModes ns_mode = enable ? GIPS_NS_CONFERENCE : GIPS_NS_DEFAULT;
+ if (gips_->vqe()->GIPSVE_SetNSStatus(true, ns_mode) == -1) {
+ LOG_GIPSERR2(SetNSStatus, true, ns_mode);
+ return false;
+ }
+
+ // Echo-cancellation is a user-option, so preserve the enable state and
+ // just toggle the mode.
+ bool aec;
+ GIPS_ECModes ec_mode;
+ GIPS_AESModes aes_mode;
+ int aes_attn;
+ if (gips_->vqe()->GIPSVE_GetECStatus(aec, ec_mode,
+ aes_mode, aes_attn) == -1) {
+ LOG_GIPSERR0(GetECStatus);
+ return false;
+ }
+ ec_mode = enable ? GIPS_EC_CONFERENCE : GIPS_EC_DEFAULT;
+ if (gips_->vqe()->GIPSVE_SetECStatus(aec, ec_mode) == -1) {
+ LOG_GIPSERR2(GetECStatus, aec, ec_mode);
+ return false;
+ }
+ return true;
+}
+
+bool GipsVoiceEngine::RegisterProcessor(
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection direction) {
+ // stubbing out...
+ return true;
+}
+
+bool GipsVoiceEngine::UnregisterProcessor(
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection direction) {
+ // stubbing out...
+ return true;
+}
+
+// GipsVoiceMediaChannel
+GipsVoiceMediaChannel::GipsVoiceMediaChannel(GipsVoiceEngine *engine)
+ : GipsMediaChannel<VoiceMediaChannel, GipsVoiceEngine>(
+ engine,
+ engine->gips()->base()->GIPSVE_CreateChannel()),
+ channel_options_(0),
+ agc_adjusted_(false),
+ dtmf_allowed_(false),
+ desired_playout_(false),
+ playout_(false),
+ desired_send_(SEND_NOTHING),
+ send_(SEND_NOTHING) {
+ engine->RegisterChannel(this);
+ LOG(LS_VERBOSE) << "GipsVoiceMediaChannel::GipsVoiceMediaChannel "
+ << gips_channel();
+
+ // Register external transport
+ if (engine->gips()->network()->GIPSVE_RegisterExternalTransport(
+ gips_channel(), *static_cast<GIPS_transport*>(this)) == -1) {
+ LOG_GIPSERR2(RegisterExternalTransport, gips_channel(), this);
+ }
+
+ // Enable RTCP (for quality stats and feedback messages)
+ EnableRtcp(gips_channel());
+
+ // Create a random but nonzero send SSRC
+ SetSendSsrc(talk_base::CreateRandomNonZeroId());
+
+#if defined(IOS) || defined(ANDROID)
+ // Turn on and configure receiving-end auto gain control
+ if (engine->gips()->vqe()->GIPSVE_SetRxAGCStatus(
+ gips_channel(), true, GIPS_AGC_FIXED_DIGITAL) != 0) {
+ LOG(LS_ERROR) << "Failed to set Rx AGC status";
+ }
+
+ // These settings were found to work well on mobile.
+ GIPS_AGC_config config;
+ config.targetLeveldBOv = 6;
+ config.digitalCompressionGaindB = 0;
+ config.limiterEnable = true;
+
+ if (engine->gips()->vqe()->GIPSVE_SetRxAGCConfig(gips_channel(),
+ config) != 0) {
+ LOG(LS_ERROR) << "Failed to set Rx AGC config for channel "
+ << gips_channel();
+ }
+#endif // IOS || ANDROID
+}
+
+GipsVoiceMediaChannel::~GipsVoiceMediaChannel() {
+ LOG(LS_VERBOSE) << "GipsVoiceMediaChannel::~GipsVoiceMediaChannel "
+ << gips_channel();
+
+ // DeRegister external transport
+ if (engine()->gips()->network()->GIPSVE_DeRegisterExternalTransport(
+ gips_channel()) == -1) {
+ LOG_GIPSERR1(DeRegisterExternalTransport, gips_channel());
+ }
+
+ // Unregister ourselves from the engine.
+ engine()->UnregisterChannel(this);
+ // Remove any remaining streams.
+ while (!mux_channels_.empty()) {
+ RemoveStream(mux_channels_.begin()->first);
+ }
+ // Delete the primary channel.
+ if (engine()->gips()->base()->GIPSVE_DeleteChannel(gips_channel()) == -1) {
+ LOG_GIPSERR1(DeleteChannel, gips_channel());
+ }
+}
+
+bool GipsVoiceMediaChannel::SetOptions(int flags) {
+ // Always accept flags that are unchanged.
+ if (channel_options_ == flags) {
+ return true;
+ }
+
+ // Reject new options if we're already sending.
+ if (send_ != SEND_NOTHING) {
+ return false;
+ }
+
+ // Save the options, to be interpreted where appropriate.
+ channel_options_ = flags;
+ return true;
+}
+
+bool GipsVoiceMediaChannel::SetRecvCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ // Update our receive payload types to match what we offered. This only is
+ // an issue when a different entity (i.e. a server) is generating the offer
+ // for us.
+ bool ret = true;
+ for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
+ i != codecs.end() && ret; ++i) {
+ GIPS_CodecInst gcodec;
+ if (engine()->FindGIPSCodec(*i, &gcodec)) {
+ if (gcodec.pltype != i->id) {
+ LOG(LS_INFO) << "Updating payload type for " << gcodec.plname
+ << " from " << gcodec.pltype << " to " << i->id;
+ gcodec.pltype = i->id;
+ if (engine()->gips()->codec()->GIPSVE_SetRecPayloadType(
+ gips_channel(), gcodec) == -1) {
+ LOG_GIPSERR1(SetRecPayloadType, gips_channel());
+ ret = false;
+ }
+ }
+ } else {
+ LOG(LS_WARNING) << "Unknown codec " << i->name;
+ ret = false;
+ }
+ }
+
+ return ret;
+}
+
+bool GipsVoiceMediaChannel::SetSendCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ // Disable DTMF, VAD, and FEC unless we know the other side wants them.
+ dtmf_allowed_ = false;
+ engine()->gips()->codec()->GIPSVE_SetVADStatus(gips_channel(), false);
+ engine()->gips()->rtp()->GIPSVE_SetFECStatus(gips_channel(), false);
+
+ // Scan through the list to figure out the codec to use for sending, along
+ // with the proper configuration for VAD and DTMF.
+ bool first = true;
+ GIPS_CodecInst send_codec;
+ memset(&send_codec, 0, sizeof(send_codec));
+
+ for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
+ i != codecs.end(); ++i) {
+ // Ignore codecs we don't know about. The negotiation step should prevent
+ // this, but double-check to be sure.
+ GIPS_CodecInst gcodec;
+ if (!engine()->FindGIPSCodec(*i, &gcodec)) {
+ LOG(LS_WARNING) << "Unknown codec " << i->name;
+ continue;
+ }
+
+ // Find the DTMF telephone event "codec" and tell gips about it.
+ if (_stricmp(i->name.c_str(), "telephone-event") == 0 ||
+ _stricmp(i->name.c_str(), "audio/telephone-event") == 0) {
+ engine()->gips()->dtmf()->GIPSVE_SetSendTelephoneEventPayloadType(
+ gips_channel(), i->id);
+ dtmf_allowed_ = true;
+ }
+
+ // Turn voice activity detection/comfort noise on if supported.
+ // Set the wideband CN payload type appropriately
+ // (narrowband always uses the static payload type 13).
+ if (_stricmp(i->name.c_str(), "CN") == 0) {
+ GIPS_PayloadFrequencies cn_freq;
+ switch (i->clockrate) {
+ case 8000:
+ cn_freq = GIPS_FREQ_8000_HZ;
+ break;
+ case 16000:
+ cn_freq = GIPS_FREQ_16000_HZ;
+ break;
+ case 32000:
+ cn_freq = GIPS_FREQ_32000_HZ;
+ break;
+ default:
+ LOG(LS_WARNING) << "CN frequency " << i->clockrate
+ << " not supported.";
+ continue;
+ }
+ engine()->gips()->codec()->GIPSVE_SetVADStatus(gips_channel(), true);
+ if (cn_freq != GIPS_FREQ_8000_HZ) {
+ engine()->gips()->codec()->GIPSVE_SetSendCNPayloadType(gips_channel(),
+ i->id, cn_freq);
+ }
+ }
+
+ // We'll use the first codec in the list to actually send audio data.
+ // Be sure to use the payload type requested by the remote side.
+ // "red", for FEC audio, is a special case where the actual codec to be
+ // used is specified in params.
+ if (first) {
+ if (_stricmp(i->name.c_str(), "red") == 0) {
+ // Parse out the RED parameters. If we fail, just ignore RED;
+ // we don't support all possible params/usage scenarios.
+ if (!GetRedSendCodec(*i, codecs, &send_codec)) {
+ continue;
+ }
+
+ // Enable redundant encoding of the specified codec. Treat any
+ // failure as a fatal internal error.
+ LOG(LS_INFO) << "Enabling RED";
+ if (engine()->gips()->rtp()->GIPSVE_SetFECStatus(gips_channel(),
+ true, i->id) == -1) {
+ LOG_GIPSERR3(SetFECStatus, gips_channel(), true, i->id);
+ return false;
+ }
+ } else {
+ send_codec = gcodec;
+ send_codec.pltype = i->id;
+ }
+ first = false;
+ }
+ }
+
+ // If we're being asked to set an empty list of codecs, due to a buggy client,
+ // choose the most common format: PCMU
+ if (first) {
+ LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
+ AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
+ engine()->FindGIPSCodec(codec, &send_codec);
+ }
+
+ // Set the codec.
+ LOG(LS_INFO) << "Selected voice codec " << send_codec.plname
+ << "/" << send_codec.plfreq;
+ if (engine()->gips()->codec()->GIPSVE_SetSendCodec(gips_channel(),
+ send_codec) == -1) {
+ LOG_GIPSERR1(SetSendCodec, gips_channel());
+ return false;
+ }
+
+ return true;
+}
+
+bool GipsVoiceMediaChannel::SetRecvRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions) {
+ // We don't support any incoming extensions headers right now.
+ return true;
+}
+
+bool GipsVoiceMediaChannel::SetSendRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions) {
+ // Enable the audio level extension header if requested.
+ std::vector<RtpHeaderExtension>::const_iterator it;
+ for (it = extensions.begin(); it != extensions.end(); ++it) {
+ if (it->uri == kRtpAudioLevelHeaderExtension) {
+ break;
+ }
+ }
+
+ bool enable = (it != extensions.end());
+ int id = 0;
+
+ if (enable) {
+ id = it->id;
+ if (id < kMinRtpHeaderExtensionId ||
+ id > kMaxRtpHeaderExtensionId) {
+ LOG(LS_WARNING) << "Invalid RTP header extension id " << id;
+ return false;
+ }
+ }
+
+// This api call is not available in iOS version of gips currently.
+#if !defined(IOS) && !defined(ANDROID)
+ if (engine()->gips()->rtp()->GIPSVE_SetRTPAudioLevelIndicationStatus(
+ gips_channel(), enable, id) == -1) {
+ LOG_GIPSERR3(SetRTPAudioLevelIndicationStatus, gips_channel(), enable, id);
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+bool GipsVoiceMediaChannel::SetPlayout(bool playout) {
+ desired_playout_ = playout;
+ return ChangePlayout(desired_playout_);
+}
+
+bool GipsVoiceMediaChannel::PausePlayout() {
+ return ChangePlayout(false);
+}
+
+bool GipsVoiceMediaChannel::ResumePlayout() {
+ return ChangePlayout(desired_playout_);
+}
+
+bool GipsVoiceMediaChannel::ChangePlayout(bool playout) {
+ if (playout_ == playout) {
+ return true;
+ }
+
+ bool result = true;
+ if (mux_channels_.empty()) {
+ // Only toggle the default channel if we don't have any other channels.
+ result = SetPlayout(gips_channel(), playout);
+ }
+ for (ChannelMap::iterator it = mux_channels_.begin();
+ it != mux_channels_.end() && result; ++it) {
+ if (!SetPlayout(it->second, playout)) {
+ LOG(LS_ERROR) << "SetPlayout " << playout << " on channel " << it->second
+ << " failed";
+ result = false;
+ }
+ }
+
+ if (result) {
+ playout_ = playout;
+ }
+ return result;
+}
+
+bool GipsVoiceMediaChannel::SetSend(SendFlags send) {
+ desired_send_ = send;
+ return ChangeSend(desired_send_);
+}
+
+bool GipsVoiceMediaChannel::PauseSend() {
+ return ChangeSend(SEND_NOTHING);
+}
+
+bool GipsVoiceMediaChannel::ResumeSend() {
+ return ChangeSend(desired_send_);
+}
+
+bool GipsVoiceMediaChannel::ChangeSend(SendFlags send) {
+ if (send_ == send) {
+ return true;
+ }
+
+ if (send == SEND_MICROPHONE) {
+#ifdef CHROMEOS
+ // Conference mode doesn't work well on ChromeOS.
+ if (!engine()->SetConferenceMode(false)) {
+ LOG_GIPSERR1(SetConferenceMode, gips_channel());
+ return false;
+ }
+#else
+ // Multi-point conferences use conference-mode noise filtering.
+ if (!engine()->SetConferenceMode(
+ 0 != (channel_options_ & OPT_CONFERENCE))) {
+ LOG_GIPSERR1(SetConferenceMode, gips_channel());
+ return false;
+ }
+#endif // CHROMEOS
+
+ // Tandberg-bridged conferences have an AGC target that is lower than
+ // GTV-only levels.
+ if ((channel_options_ & OPT_AGC_TANDBERG_LEVELS) && !agc_adjusted_) {
+ if (engine()->AdjustAgcLevel(kTandbergDbAdjustment)) {
+ agc_adjusted_ = true;
+ }
+ }
+
+ // GIPS resets sequence number when StopSend is called. This
+ // sometimes causes libSRTP to complain about packets being
+ // replayed. To get around this we store the last sent sequence
+ // number and initializes the channel with the next to continue on
+ // the same sequence.
+ if (sequence_number() != -1) {
+ LOG(LS_INFO) << "GipsVoiceMediaChannel restores seqnum="
+ << sequence_number() + 1;
+ if (engine()->gips()->sync()->GIPSVE_SetInitSequenceNumber(
+ gips_channel(), sequence_number() + 1) == -1) {
+ LOG_GIPSERR2(SetInitSequenceNumber, gips_channel(),
+ sequence_number() + 1);
+ }
+ }
+ if (engine()->gips()->base()->GIPSVE_StartSend(gips_channel()) == -1) {
+ LOG_GIPSERR1(StartSend, gips_channel());
+ return false;
+ }
+ if (engine()->gips()->file()->GIPSVE_StopPlayingFileAsMicrophone(
+ gips_channel()) == -1) {
+ LOG_GIPSERR1(StopPlayingFileAsMicrophone, gips_channel());
+ return false;
+ }
+ } else if (send == SEND_RINGBACKTONE) {
+ ASSERT(ringback_tone_.get() != NULL);
+ if (!ringback_tone_.get()) {
+ return false;
+ }
+ if (engine()->gips()->file()->GIPSVE_StartPlayingFileAsMicrophone(
+ gips_channel(), ringback_tone_.get(), false) == -1) {
+ LOG_GIPSERR3(StartPlayingFileAsMicrophone, gips_channel(),
+ ringback_tone_.get(), false);
+ return false;
+ }
+ // GIPS resets sequence number when StopSend is called. This
+ // sometimes causes libSRTP to complain about packets being
+ // replayed. To get around this we store the last sent sequence
+ // number and initializes the channel with the next to continue on
+ // the same sequence.
+ if (sequence_number() != -1) {
+ LOG(LS_INFO) << "GipsVoiceMediaChannel restores seqnum="
+ << sequence_number() + 1;
+ if (engine()->gips()->sync()->GIPSVE_SetInitSequenceNumber(
+ gips_channel(), sequence_number() + 1) == -1) {
+ LOG_GIPSERR2(SetInitSequenceNumber, gips_channel(),
+ sequence_number() + 1);
+ }
+ }
+ if (engine()->gips()->base()->GIPSVE_StartSend(gips_channel()) == -1) {
+ LOG_GIPSERR1(StartSend, gips_channel());
+ return false;
+ }
+ } else { // SEND_NOTHING
+ if (engine()->gips()->base()->GIPSVE_StopSend(gips_channel()) == -1) {
+ LOG_GIPSERR1(StopSend, gips_channel());
+ }
+
+ // Reset the AGC level, if it was set.
+ if (agc_adjusted_) {
+ if (engine()->AdjustAgcLevel(0)) {
+ agc_adjusted_ = false;
+ }
+ }
+
+ // Disable conference-mode noise filtering.
+ if (!engine()->SetConferenceMode(false)) {
+ LOG_GIPSERR1(SetConferenceMode, gips_channel());
+ }
+ }
+ send_ = send;
+ return true;
+}
+
+bool GipsVoiceMediaChannel::AddStream(uint32 ssrc) {
+ talk_base::CritScope lock(&mux_channels_cs_);
+
+ if (mux_channels_.find(ssrc) != mux_channels_.end()) {
+ return false;
+ }
+
+ // Create a new channel for receiving audio data.
+ int channel = engine()->gips()->base()->GIPSVE_CreateChannel();
+ if (channel == -1) {
+ LOG_GIPSERR0(CreateChannel);
+ return false;
+ }
+
+ // Configure to use external transport, like our default channel.
+ if (engine()->gips()->network()->GIPSVE_RegisterExternalTransport(
+ channel, *this) == -1) {
+ LOG_GIPSERR2(SetExternalTransport, channel, this);
+ return false;
+ }
+
+ // Use the same SSRC as our default channel (so the RTCP reports are correct).
+ unsigned int send_ssrc;
+ GIPSVERTP_RTCP* rtp = engine()->gips()->rtp();
+ if (rtp->GIPSVE_GetLocalSSRC(gips_channel(), send_ssrc) == -1) {
+ LOG_GIPSERR2(GetSendSSRC, channel, send_ssrc);
+ return false;
+ }
+ if (rtp->GIPSVE_SetLocalSSRC(channel, send_ssrc) == -1) {
+ LOG_GIPSERR2(SetSendSSRC, channel, send_ssrc);
+ return false;
+ }
+
+ if (mux_channels_.empty() && playout_) {
+ // This is the first stream in a multi user meeting. We can now
+ // disable playback of the default stream. This since the default
+ // stream will probably have received some initial packets before
+ // the new stream was added. This will mean that the CN state from
+ // the default channel will be mixed in with the other streams
+ // throughout the whole meeting, which might be disturbing.
+ LOG(LS_INFO) << "Disabling playback on the default voice channel";
+ SetPlayout(gips_channel(), false);
+ }
+
+ mux_channels_[ssrc] = channel;
+
+ // TODO(juberti): We should rollback the add if SetPlayout fails.
+ LOG(LS_INFO) << "New audio stream " << ssrc << " registered to channel "
+ << channel << ".";
+ return SetPlayout(channel, playout_);
+}
+
+bool GipsVoiceMediaChannel::RemoveStream(uint32 ssrc) {
+ talk_base::CritScope lock(&mux_channels_cs_);
+ ChannelMap::iterator it = mux_channels_.find(ssrc);
+
+ if (it != mux_channels_.end()) {
+ if (engine()->gips()->network()->GIPSVE_DeRegisterExternalTransport(
+ it->second) == -1) {
+ LOG_GIPSERR1(DeRegisterExternalTransport, it->second);
+ }
+
+ LOG(LS_INFO) << "Removing audio stream " << ssrc << " with channel "
+ << it->second << ".";
+ if (engine()->gips()->base()->GIPSVE_DeleteChannel(it->second) == -1) {
+ LOG_GIPSERR1(DeleteChannel, gips_channel());
+ return false;
+ }
+
+ mux_channels_.erase(it);
+ if (mux_channels_.empty() && playout_) {
+ // The last stream was removed. We can now enable the default
+ // channel for new channels to be played out immediately without
+ // waiting for AddStream messages.
+ // TODO(oja): Does the default channel still have it's CN state?
+ LOG(LS_INFO) << "Enabling playback on the default voice channel";
+ SetPlayout(gips_channel(), true);
+ }
+ }
+ return true;
+}
+
+bool GipsVoiceMediaChannel::GetActiveStreams(AudioInfo::StreamList* actives) {
+ actives->clear();
+ for (ChannelMap::iterator it = mux_channels_.begin();
+ it != mux_channels_.end(); ++it) {
+ int level = GetOutputLevel(it->second);
+ if (level > 0) {
+ actives->push_back(std::make_pair(it->first, level));
+ }
+ }
+ return true;
+}
+
+int GipsVoiceMediaChannel::GetOutputLevel() {
+ // return the highest output level of all streams
+ int highest = GetOutputLevel(gips_channel());
+ for (ChannelMap::iterator it = mux_channels_.begin();
+ it != mux_channels_.end(); ++it) {
+ int level = GetOutputLevel(it->second);
+ highest = talk_base::_max(level, highest);
+ }
+ return highest;
+}
+
+bool GipsVoiceMediaChannel::SetOutputScaling(
+ uint32 ssrc, double left, double right) {
+ talk_base::CritScope lock(&mux_channels_cs_);
+ // Collect the channels to scale the output volume.
+ std::vector<int> channels;
+ if (0 == ssrc) { // Collect all channels, including the default one.
+ channels.push_back(gips_channel());
+ for (ChannelMap::const_iterator it = mux_channels_.begin();
+ it != mux_channels_.end(); ++it) {
+ channels.push_back(it->second);
+ }
+ } else { // Collect only the channel of the specified ssrc.
+ int channel = GetChannel(ssrc);
+ if (-1 == channel) {
+ LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
+ return false;
+ }
+ channels.push_back(channel);
+ }
+
+ // Scale the output volume for the collected channels. We first set the
+ // scaling and then set the pan.
+ float scale = static_cast<float>(talk_base::_max(left, right));
+ if (scale > 0.0001f) {
+ left /= scale;
+ right /= scale;
+ }
+ for (std::vector<int>::const_iterator it = channels.begin();
+ it != channels.end(); ++it) {
+ if (-1 == engine()->gips()->volume()->GIPSVE_SetChannelOutputVolumeScaling(
+ *it, scale)) {
+ LOG_GIPSERR2(SetChannelOutputVolumeScaling, *it, scale);
+ return false;
+ }
+ if (-1 == engine()->gips()->volume()->GIPSVE_SetOutputVolumePan(
+ *it, static_cast<float>(left), static_cast<float>(right))) {
+ LOG_GIPSERR3(SetOutputVolumePan, *it, left, right);
+ // Do not return if fails. SetOutputVolumePan is not available for all
+ // pltforms.
+ }
+ LOG(LS_INFO) << "SetOutputScaling to left=" << left * scale
+ << " right=" << right * scale
+ << " for channel " << *it << " and ssrc " << ssrc;
+ }
+ return true;
+}
+
+bool GipsVoiceMediaChannel::GetOutputScaling(
+ uint32 ssrc, double* left, double* right) {
+ if (!left || !right) return false;
+
+ talk_base::CritScope lock(&mux_channels_cs_);
+ // Determine which channel based on ssrc.
+ int channel = (0 == ssrc) ? gips_channel() : GetChannel(ssrc);
+ if (channel == -1) {
+ LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
+ return false;
+ }
+
+ float scaling;
+ if (-1 == engine()->gips()->volume()->GIPSVE_GetChannelOutputVolumeScaling(
+ channel, scaling)) {
+ LOG_GIPSERR2(GetChannelOutputVolumeScaling, channel, scaling);
+ return false;
+ }
+
+ float left_pan;
+ float right_pan;
+ if (-1 == engine()->gips()->volume()->GIPSVE_GetOutputVolumePan(
+ channel, left_pan, right_pan)) {
+ LOG_GIPSERR3(GIPSVE_GetOutputVolumePan, channel, left_pan, right_pan);
+ // If GetOutputVolumePan fails, we use the default left and right pan.
+ left_pan = 1.0f;
+ right_pan = 1.0f;
+ }
+
+ *left = scaling * left_pan;
+ *right = scaling * right_pan;
+ return true;
+}
+
+bool GipsVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
+ ringback_tone_.reset(new GipsSoundclipStream(buf, len));
+ return true;
+}
+
+bool GipsVoiceMediaChannel::PlayRingbackTone(uint32 ssrc,
+ bool play, bool loop) {
+ if (!ringback_tone_.get()) {
+ return false;
+ }
+
+ // Determine which GIPS channel to play on.
+ int channel = (ssrc == 0) ? gips_channel() : GetChannel(ssrc);
+ if (channel == -1) {
+ return false;
+ }
+
+ // Make sure the ringtone is cued properly, and play it out.
+ if (play) {
+ ringback_tone_->set_loop(loop);
+ ringback_tone_->Rewind();
+ if (engine()->gips()->file()->GIPSVE_StartPlayingFileLocally(channel,
+ ringback_tone_.get()) == -1) {
+ LOG_GIPSERR2(StartPlayingFileLocally, channel, ringback_tone_.get());
+ LOG(LS_ERROR) << "Unable to start ringback tone";
+ return false;
+ }
+ ringback_channels_.insert(channel);
+ LOG(LS_INFO) << "Started ringback on channel " << channel;
+ } else {
+ if (engine()->gips()->file()->GIPSVE_StopPlayingFileLocally(channel)
+ == -1) {
+ LOG_GIPSERR1(StopPlayingFileLocally, channel);
+ return false;
+ }
+ LOG(LS_INFO) << "Stopped ringback on channel " << channel;
+ ringback_channels_.erase(channel);
+ }
+
+ return true;
+}
+
+bool GipsVoiceMediaChannel::PressDTMF(int event, bool playout) {
+ if (!dtmf_allowed_) {
+ return false;
+ }
+
+ // Enable or disable DTMF playout of this tone as requested. This will linger
+ // until the next call to this method, but that's OK.
+ if (engine()->gips()->dtmf()->GIPSVE_SetDTMFFeedbackStatus(playout) == -1) {
+ LOG_GIPSERR2(SendDTMF, gips_channel(), playout);
+ return false;
+ }
+
+ // Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
+ if (engine()->gips()->dtmf()->GIPSVE_SendTelephoneEvent(gips_channel(), event,
+ true) == -1) {
+ LOG_GIPSERR3(SendDTMF, gips_channel(), event, true);
+ return false;
+ }
+
+ return true;
+}
+
+void GipsVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
+ // Pick which channel to send this packet to. If this packet doesn't match
+ // any multiplexed streams, just send it to the default channel. Otherwise,
+ // send it to the specific decoder instance for that stream.
+ int which_channel = GetChannel(
+ ParseSsrc(packet->data(), packet->length(), false));
+ if (which_channel == -1) {
+ which_channel = gips_channel();
+ }
+
+ // Stop any ringback that might be playing on the channel.
+ // It's possible the ringback has already stopped, ih which case we'll just
+ // use the opportunity to remove the channel from ringback_channels_.
+ const std::set<int>::iterator it = ringback_channels_.find(which_channel);
+ if (it != ringback_channels_.end()) {
+ if (engine()->gips()->file()->GIPSVE_IsPlayingFileLocally(
+ which_channel) == 1) {
+ engine()->gips()->file()->GIPSVE_StopPlayingFileLocally(which_channel);
+ LOG(LS_INFO) << "Stopped ringback on channel " << which_channel
+ << " due to incoming media";
+ }
+ ringback_channels_.erase(which_channel);
+ }
+
+ // Pass it off to the decoder.
+ engine()->gips()->network()->GIPSVE_ReceivedRTPPacket(which_channel,
+ packet->data(),
+ packet->length());
+}
+
+void GipsVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
+ // See above.
+ int which_channel = GetChannel(
+ ParseSsrc(packet->data(), packet->length(), true));
+ if (which_channel == -1) {
+ which_channel = gips_channel();
+ }
+
+ engine()->gips()->network()->GIPSVE_ReceivedRTCPPacket(which_channel,
+ packet->data(),
+ packet->length());
+}
+
+void GipsVoiceMediaChannel::SetSendSsrc(uint32 ssrc) {
+ if (engine()->gips()->rtp()->GIPSVE_SetLocalSSRC(gips_channel(), ssrc)
+ == -1) {
+ LOG_GIPSERR2(SetSendSSRC, gips_channel(), ssrc);
+ }
+}
+
+bool GipsVoiceMediaChannel::SetRtcpCName(const std::string& cname) {
+ if (engine()->gips()->rtp()->GIPSVE_SetRTCP_CNAME(gips_channel(),
+ cname.c_str()) == -1) {
+ LOG_GIPSERR2(SetRTCP_CNAME, gips_channel(), cname);
+ return false;
+ }
+ return true;
+}
+
+bool GipsVoiceMediaChannel::Mute(bool muted) {
+ if (engine()->gips()->volume()->GIPSVE_SetInputMute(gips_channel(),
+ muted) == -1) {
+ LOG_GIPSERR2(SetInputMute, gips_channel(), muted);
+ return false;
+ }
+ return true;
+}
+
+bool GipsVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
+ // In VoiceEngine 3.5, GetRTCPStatistics will return 0 even when it fails,
+ // causing the stats to contain garbage information. To prevent this, we
+ // zero the stats structure before calling this API.
+ // TODO(juberti): Remove this workaround.
+ GIPS_CallStatistics cs;
+ unsigned int ssrc;
+ GIPS_CodecInst codec;
+ unsigned int level;
+
+ // Fill in the sender info, based on what we know, and what the
+ // remote side told us it got from its RTCP report.
+ VoiceSenderInfo sinfo;
+
+ // Data we obtain locally.
+ memset(&cs, 0, sizeof(cs));
+ if (engine()->gips()->rtp()->GIPSVE_GetRTCPStatistics(
+ gips_channel(), cs) == -1 ||
+ engine()->gips()->rtp()->GIPSVE_GetLocalSSRC(
+ gips_channel(), ssrc) == -1) {
+ return false;
+ }
+
+ sinfo.ssrc = ssrc;
+ sinfo.bytes_sent = cs.bytesSent;
+ sinfo.packets_sent = cs.packetsSent;
+ // RTT isn't known until a RTCP report is received. Until then, GIPS
+ // returns 0 to indicate an error value.
+ sinfo.rtt_ms = (cs.rttMs > 0) ? cs.rttMs : -1;
+
+ // Data from the last remote RTCP report.
+ unsigned int ntp_high, ntp_low, timestamp, ptimestamp, jitter;
+ unsigned short loss; // NOLINT
+ if (engine()->gips()->rtp()->GIPSVE_GetRemoteRTCPData(gips_channel(),
+ ntp_high, ntp_low, timestamp, ptimestamp, &jitter, &loss) != -1 &&
+ engine()->gips()->codec()->GIPSVE_GetSendCodec(gips_channel(),
+ codec) != -1) {
+ // Convert Q8 to floating point.
+ sinfo.fraction_lost = static_cast<float>(loss) / (1 << 8);
+ // Convert samples to milliseconds.
+ if (codec.plfreq / 1000 > 0) {
+ sinfo.jitter_ms = jitter / (codec.plfreq / 1000);
+ }
+ } else {
+ sinfo.fraction_lost = -1;
+ sinfo.jitter_ms = -1;
+ }
+ // TODO(juberti): Figure out how to get remote packets_lost, ext_seqnum
+ sinfo.packets_lost = -1;
+ sinfo.ext_seqnum = -1;
+
+ // Local speech level.
+ sinfo.audio_level = (engine()->gips()->volume()->
+ GIPSVE_GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
+ info->senders.push_back(sinfo);
+
+ // Build the list of receivers, one for each mux channel, or 1 in a 1:1 call.
+ std::vector<int> channels;
+ for (ChannelMap::const_iterator it = mux_channels_.begin();
+ it != mux_channels_.end(); ++it) {
+ channels.push_back(it->second);
+ }
+ if (channels.empty()) {
+ channels.push_back(gips_channel());
+ }
+
+ // Get the SSRC and stats for each receiver, based on our own calculations.
+ for (std::vector<int>::const_iterator it = channels.begin();
+ it != channels.end(); ++it) {
+ memset(&cs, 0, sizeof(cs));
+ if (engine()->gips()->rtp()->GIPSVE_GetRemoteSSRC(*it, ssrc) != -1 &&
+ engine()->gips()->rtp()->GIPSVE_GetRTCPStatistics(*it, cs) != -1 &&
+ engine()->gips()->codec()->GIPSVE_GetRecCodec(*it, codec) != -1) {
+ VoiceReceiverInfo rinfo;
+ rinfo.ssrc = ssrc;
+ rinfo.bytes_rcvd = cs.bytesReceived;
+ rinfo.packets_rcvd = cs.packetsReceived;
+ // The next four fields are from the most recently sent RTCP report.
+ // Convert Q8 to floating point.
+ rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
+ rinfo.packets_lost = cs.cumulativeLost;
+ rinfo.ext_seqnum = cs.extendedMax;
+ // Convert samples to milliseconds.
+ if (codec.plfreq / 1000 > 0) {
+ rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
+ }
+
+ // Get jitter buffer and total delay (alg + jitter + playout) stats.
+ GIPS_NetworkStatistics ns;
+ if (engine()->gips()->neteq() &&
+ engine()->gips()->neteq()->GIPSVE_GetNetworkStatistics(
+ *it, ns) != -1) {
+ rinfo.jitter_buffer_ms = ns.currentBufferSize;
+ rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
+ }
+ if (engine()->gips()->sync()) {
+ engine()->gips()->sync()->GIPSVE_GetDelayEstimate(*it,
+ rinfo.delay_estimate_ms);
+ }
+
+ // Get speech level.
+ rinfo.audio_level = (engine()->gips()->volume()->
+ GIPSVE_GetSpeechOutputLevelFullRange(*it, level) != -1) ? level : -1;
+ info->receivers.push_back(rinfo);
+ }
+ }
+
+ return true;
+}
+
+void GipsVoiceMediaChannel::GetLastMediaError(
+ uint32* ssrc, VoiceMediaChannel::Error* error) {
+ ASSERT(ssrc != NULL);
+ ASSERT(error != NULL);
+ FindSsrc(gips_channel(), ssrc);
+ *error = GipsErrorToChannelError(GetLastGipsError());
+}
+
+bool GipsVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
+ talk_base::CritScope lock(&mux_channels_cs_);
+ ASSERT(ssrc != NULL);
+ if (channel_num == gips_channel()) {
+ unsigned local_ssrc = 0;
+ // This is a sending channel.
+ if (engine()->gips()->rtp()->GIPSVE_GetLocalSSRC(
+ channel_num, local_ssrc) != -1) {
+ *ssrc = local_ssrc;
+ }
+ return true;
+ } else if (channel_num == -1 && send_ != SEND_NOTHING) {
+ // Sometimes the GIPS core will throw error with channel_num = -1. This
+ // means the error is not limited to a specific channel. Signal the
+ // message using ssrc=0. If the current channel is sending, use this
+ // channel for sending the message.
+ *ssrc = 0;
+ return true;
+ } else {
+ // Check whether this is a receiving channel.
+ for (ChannelMap::const_iterator it = mux_channels_.begin();
+ it != mux_channels_.end(); ++it) {
+ if (it->second == channel_num) {
+ *ssrc = it->first;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void GipsVoiceMediaChannel::OnError(uint32 ssrc, int error) {
+ SignalMediaError(ssrc, GipsErrorToChannelError(error));
+}
+
+int GipsVoiceMediaChannel::GetChannel(uint32 ssrc) {
+ ChannelMap::iterator it = mux_channels_.find(ssrc);
+ return (it != mux_channels_.end()) ? it->second : -1;
+}
+
+int GipsVoiceMediaChannel::GetOutputLevel(int channel) {
+ unsigned int ulevel;
+ int ret =
+ engine()->gips()->volume()->GIPSVE_GetSpeechOutputLevel(channel, ulevel);
+ return (ret == 0) ? static_cast<int>(ulevel) : -1;
+}
+
+bool GipsVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
+ const std::vector<AudioCodec>& all_codecs, GIPS_CodecInst* send_codec) {
+ // Get the RED encodings from the parameter with no name. This may
+ // change based on what is discussed on the Jingle list.
+ // The encoding parameter is of the form "a/b"; we only support where
+ // a == b. Verify this and parse out the value into red_pt.
+ // If the parameter value is absent (as it will be until we wire up the
+ // signaling of this message), use the second codec specified (i.e. the
+ // one after "red") as the encoding parameter.
+ int red_pt = -1;
+ std::string red_params;
+ CodecParameterMap::const_iterator it = red_codec.params.find("");
+ if (it != red_codec.params.end()) {
+ red_params = it->second;
+ std::vector<std::string> red_pts;
+ if (talk_base::split(red_params, '/', &red_pts) != 2 ||
+ red_pts[0] != red_pts[1] ||
+ !talk_base::FromString(red_pts[0], &red_pt)) {
+ LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
+ return false;
+ }
+ } else if (red_codec.params.empty()) {
+ LOG(LS_WARNING) << "RED params not present, using defaults";
+ if (all_codecs.size() > 1) {
+ red_pt = all_codecs[1].id;
+ }
+ }
+
+ // Try to find red_pt in |codecs|.
+ std::vector<AudioCodec>::const_iterator codec;
+ for (codec = all_codecs.begin(); codec != all_codecs.end(); ++codec) {
+ if (codec->id == red_pt)
+ break;
+ }
+
+ // If we find the right codec, that will be the codec we pass to
+ // GIPSVE_SetSendCodec, with the desired payload type.
+ if (codec != all_codecs.end() &&
+ engine()->FindGIPSCodec(*codec, send_codec)) {
+ send_codec->pltype = red_pt;
+ } else {
+ LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
+ return false;
+ }
+
+ return true;
+}
+
+bool GipsVoiceMediaChannel::EnableRtcp(int channel) {
+ if (engine()->gips()->rtp()->GIPSVE_SetRTCPStatus(channel, true) == -1) {
+ LOG_GIPSERR2(SetRTCPStatus, gips_channel(), 1);
+ return false;
+ }
+ // TODO(juberti): Enable VQMon and RTCP XR reports, once we know what
+ // what we want to do with them.
+ // engine()->gips().GIPSVE_EnableVQMon(gips_channel(), true);
+ // engine()->gips().GIPSVE_EnableRTCP_XR(gips_channel(), true);
+ return true;
+}
+
+bool GipsVoiceMediaChannel::SetPlayout(int channel, bool playout) {
+ if (playout) {
+ LOG(LS_INFO) << "Starting playout for channel " << channel;
+ if (engine()->gips()->base()->GIPSVE_StartPlayout(channel) == -1) {
+ LOG_GIPSERR1(StartPlayout, channel);
+ return false;
+ }
+ } else {
+ LOG(LS_INFO) << "Stopping playout for channel " << channel;
+ engine()->gips()->base()->GIPSVE_StopPlayout(channel);
+ }
+ return true;
+}
+
+uint32 GipsVoiceMediaChannel::ParseSsrc(const void* data, size_t len,
+ bool rtcp) {
+ size_t ssrc_pos = (!rtcp) ? 8 : 4;
+ uint32 ssrc = 0;
+ if (len >= (ssrc_pos + sizeof(ssrc))) {
+ ssrc = talk_base::GetBE32(static_cast<const char*>(data) + ssrc_pos);
+ }
+ return ssrc;
+}
+
+// Convert GIPS error code into VoiceMediaChannel::Error enum.
+VoiceMediaChannel::Error GipsVoiceMediaChannel::GipsErrorToChannelError(
+ int err_code) {
+ switch (err_code) {
+ case 0:
+ return ERROR_NONE;
+ case VE_CANNOT_START_RECORDING:
+ case VE_MIC_VOL_ERROR:
+ case VE_GET_MIC_VOL_ERROR:
+ case VE_CANNOT_ACCESS_MIC_VOL:
+ return ERROR_REC_DEVICE_OPEN_FAILED;
+ case VE_SATURATION_WARNING:
+ return ERROR_REC_DEVICE_SATURATION;
+ case VE_REC_DEVICE_REMOVED:
+ return ERROR_REC_DEVICE_REMOVED;
+ case VE_RUNTIME_REC_WARNING:
+ case VE_RUNTIME_REC_ERROR:
+ return ERROR_REC_RUNTIME_ERROR;
+ case VE_CANNOT_START_PLAYOUT:
+ case VE_SPEAKER_VOL_ERROR:
+ case VE_GET_SPEAKER_VOL_ERROR:
+ case VE_CANNOT_ACCESS_SPEAKER_VOL:
+ return ERROR_PLAY_DEVICE_OPEN_FAILED;
+ case VE_RUNTIME_PLAY_WARNING:
+ case VE_RUNTIME_PLAY_ERROR:
+ return ERROR_PLAY_RUNTIME_ERROR;
+ case VE_TYPING_NOISE_WARNING:
+ return ERROR_REC_TYPING_NOISE_DETECTED;
+ default:
+ return VoiceMediaChannel::ERROR_OTHER;
+ }
+}
+
+int GipsSoundclipStream::Read(void *buf, int len) {
+ size_t res = 0;
+ mem_.Read(buf, len, &res, NULL);
+ return res;
+}
+
+int GipsSoundclipStream::Rewind() {
+ mem_.Rewind();
+ // Return -1 to keep GIPS from looping.
+ return (loop_) ? 0 : -1;
+}
+
+} // namespace cricket
+
+#endif // HAVE_GIPS
+
+// shhhhh}
diff --git a/talk/session/phone/gipsmediaengine.h b/talk/session/phone/gipsmediaengine.h
new file mode 100644
index 0000000..86cc3f5
--- /dev/null
+++ b/talk/session/phone/gipsmediaengine.h
@@ -0,0 +1,328 @@
+/*
+ * libjingle
+ * Copyright 2004--2005, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_GIPSMEDIAENGINE_H_
+#define TALK_SESSION_PHONE_GIPSMEDIAENGINE_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "talk/base/buffer.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stream.h"
+#include "talk/session/phone/channel.h"
+#include "talk/session/phone/mediaprocessorinterface.h"
+#include "talk/session/phone/mediaengine.h"
+#include "talk/session/phone/gips.h"
+#include "talk/session/phone/rtputils.h"
+
+namespace cricket {
+
+// GipsSoundclipStream is an adapter object that allows a memory stream to be
+// passed into GIPS, and support looping.
+class GipsSoundclipStream : public InStream {
+ public:
+ GipsSoundclipStream(const char* buf, size_t len)
+ : mem_(buf, len), loop_(true) {
+ }
+ void set_loop(bool loop) { loop_ = loop; }
+ virtual int Read(void* buf, int len);
+ virtual int Rewind();
+
+ private:
+ talk_base::MemoryStream mem_;
+ bool loop_;
+};
+
+// GipsMonitorStream is used to monitor a stream coming from GIPS.
+// For now we just dump the data.
+class GipsMonitorStream : public OutStream {
+ virtual bool Write(const void *buf, int len) {
+ return true;
+ }
+};
+
+class GipsSoundclipMedia;
+class GipsVoiceMediaChannel;
+
+// GipsVoiceEngine is a class to be used with CompositeMediaEngine.
+// It uses the GIPS VoiceEngine library for audio handling.
+class GipsVoiceEngine
+ : public GIPSVoiceEngineObserver,
+ public GIPSTraceCallback {
+ public:
+ GipsVoiceEngine(); // NOLINT
+ // Dependency injection for testing.
+ GipsVoiceEngine(GipsWrapper* gips,
+ GipsWrapper* gips_sc,
+ GipsTraceWrapper* tracing);
+ ~GipsVoiceEngine();
+ bool Init();
+ void Terminate();
+
+ int GetCapabilities();
+ VoiceMediaChannel* CreateChannel();
+
+ SoundclipMedia *CreateSoundclip();
+
+ bool SetOptions(int options);
+ bool SetDevices(const Device* in_device, const Device* out_device);
+ bool GetOutputVolume(int* level);
+ bool SetOutputVolume(int level);
+ int GetInputLevel();
+ bool SetLocalMonitor(bool enable);
+
+ const std::vector<AudioCodec>& codecs();
+ bool FindCodec(const AudioCodec& codec);
+ bool FindGIPSCodec(const AudioCodec& codec, GIPS_CodecInst* gcodec);
+
+ void SetLogging(int min_sev, const char* filter);
+
+ // For tracking GIPS channels. Needed because we have to pause them
+ // all when switching devices.
+ // May only be called by GipsVoiceMediaChannel.
+ void RegisterChannel(GipsVoiceMediaChannel *channel);
+ void UnregisterChannel(GipsVoiceMediaChannel *channel);
+ // May only be called by GipsSoundclipMedia.
+ void RegisterSoundclip(GipsSoundclipMedia *channel);
+ void UnregisterSoundclip(GipsSoundclipMedia *channel);
+
+ // Called by GipsVoiceMediaChannel to set a gain offset from
+ // the default AGC target level.
+ bool AdjustAgcLevel(int delta);
+
+ // Called by GipsVoiceMediaChannel to configure echo cancellation
+ // and noise suppression modes.
+ bool SetConferenceMode(bool enable);
+
+ GipsWrapper* gips() { return gips_.get(); }
+ GipsWrapper* gips_sc() { return gips_sc_.get(); }
+ int GetLastGipsError();
+
+ virtual bool RegisterVoiceProcessor(VoiceProcessor* vp_interface,
+ MediaProcessorDirection direction);
+ virtual bool UnregisterVoiceProcessor(VoiceProcessor* vp_interface,
+ MediaProcessorDirection direction);
+
+ private:
+ typedef std::vector<GipsSoundclipMedia *> SoundclipList;
+ typedef std::vector<GipsVoiceMediaChannel *> ChannelList;
+
+ struct CodecPref {
+ const char* name;
+ int clockrate;
+ };
+
+ void Construct();
+ bool InitInternal();
+ void ApplyLogging(const std::string& log_filter);
+ virtual void Print(const GIPS::TraceLevel level,
+ const char* traceString, const int length);
+ virtual void CallbackOnError(const int channel, const int errCode);
+ static int GetCodecPreference(const char *name, int clockrate);
+ // Given the device type, name, and id, find GIPS's device id. Return true and
+ // set the output parameter gips_id if successful.
+ bool FindGipsAudioDeviceId(
+ bool is_input, const std::string& dev_name, int dev_id, int* gips_id);
+ bool FindChannelAndSsrc(int channel_num,
+ GipsVoiceMediaChannel** channel,
+ uint32* ssrc) const;
+ bool ChangeLocalMonitor(bool enable);
+ bool PauseLocalMonitor();
+ bool ResumeLocalMonitor();
+
+ static const int kDefaultLogSeverity = talk_base::LS_WARNING;
+ static const CodecPref kCodecPrefs[];
+
+ // The primary instance of GIPS VoiceEngine.
+ talk_base::scoped_ptr<GipsWrapper> gips_;
+ // A secondary instance, for playing out soundclips (on the 'ring' device).
+ talk_base::scoped_ptr<GipsWrapper> gips_sc_;
+ talk_base::scoped_ptr<GipsTraceWrapper> tracing_;
+ int log_level_;
+ std::string log_filter_;
+ bool is_dumping_aec_;
+ std::vector<AudioCodec> codecs_;
+ bool desired_local_monitor_enable_;
+ talk_base::scoped_ptr<GipsMonitorStream> monitor_;
+ SoundclipList soundclips_;
+ ChannelList channels_;
+ // channels_ can be read from GIPS callback thread. We need a lock on that
+ // callback as well as the RegisterChannel/UnregisterChannel.
+ talk_base::CriticalSection channels_cs_;
+ GIPS_AGC_config default_agc_config_;
+ bool initialized_;
+
+ sigslot::signal2<uint32, AudioFrame*> SignalTxMediaFrame;
+ sigslot::signal2<uint32, bool> SignalTxMute;
+ sigslot::signal2<uint32, AudioFrame*> SignalRxMediaFrame;
+ sigslot::signal2<uint32, bool> SignalRxMute;
+ talk_base::CriticalSection signal_media_critical_;
+};
+
+// GipsMediaChannel is a class that implements the common GIPS channel
+// functionality.
+template <class T, class E>
+class GipsMediaChannel : public T, public GIPS_transport {
+ public:
+ GipsMediaChannel(E *engine, int channel)
+ : engine_(engine), gips_channel_(channel), sequence_number_(-1) {}
+ E *engine() { return engine_; }
+ int gips_channel() const { return gips_channel_; }
+ bool valid() const { return gips_channel_ != -1; }
+
+ protected:
+ // implements GIPS_transport interface
+ virtual int SendPacket(int channel, const void *data, int len) {
+ if (!T::network_interface_) {
+ return -1;
+ }
+
+ // We need to store the sequence number to be able to pick up
+ // the same sequence when the device is restarted.
+ // TODO(oja): Remove when GIPS has fixed the problem.
+ int seq_num;
+ if (!GetRtpSeqNum(data, len, &seq_num)) {
+ return -1;
+ }
+ if (sequence_number() == -1) {
+ LOG(LS_INFO) << "GipsVoiceMediaChannel sends first packet seqnum="
+ << seq_num;
+ }
+ sequence_number_ = seq_num;
+
+ talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+ return T::network_interface_->SendPacket(&packet) ? len : -1;
+ }
+ virtual int SendRTCPPacket(int channel, const void *data, int len) {
+ if (!T::network_interface_) {
+ return -1;
+ }
+
+ talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+ return T::network_interface_->SendRtcp(&packet) ? len : -1;
+ }
+ int sequence_number() const {
+ return sequence_number_;
+ }
+
+ private:
+ E *engine_;
+ int gips_channel_;
+ int sequence_number_;
+};
+
+// GipsVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
+// GIPS Voice Engine.
+class GipsVoiceMediaChannel
+ : public GipsMediaChannel<VoiceMediaChannel, GipsVoiceEngine> {
+ public:
+ explicit GipsVoiceMediaChannel(GipsVoiceEngine *engine);
+ virtual ~GipsVoiceMediaChannel();
+ virtual bool SetOptions(int options);
+ virtual bool SetRecvCodecs(const std::vector<AudioCodec> &codecs);
+ virtual bool SetSendCodecs(const std::vector<AudioCodec> &codecs);
+ virtual bool SetRecvRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension> &extensions);
+ virtual bool SetSendRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension> &extensions);
+ virtual bool SetPlayout(bool playout);
+ bool PausePlayout();
+ bool ResumePlayout();
+ virtual bool SetSend(SendFlags send);
+ bool PauseSend();
+ bool ResumeSend();
+
+ virtual bool AddStream(uint32 ssrc);
+ virtual bool RemoveStream(uint32 ssrc);
+ virtual bool GetActiveStreams(AudioInfo::StreamList* actives);
+ virtual int GetOutputLevel();
+ virtual bool SetOutputScaling(uint32 ssrc, double left, double right);
+ virtual bool GetOutputScaling(uint32 ssrc, double* left, double* right);
+
+ virtual bool SetRingbackTone(const char *buf, int len);
+ virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
+ virtual bool PressDTMF(int event, bool playout);
+
+ virtual void OnPacketReceived(talk_base::Buffer* packet);
+ virtual void OnRtcpReceived(talk_base::Buffer* packet);
+ virtual void SetSendSsrc(uint32 id);
+ virtual bool SetRtcpCName(const std::string& cname);
+ virtual bool Mute(bool mute);
+ virtual bool SetSendBandwidth(bool autobw, int bps) { return false; }
+ virtual bool GetStats(VoiceMediaInfo* info);
+ // Gets last reported error from GIPS voice engine. This should be only
+ // called in response a failure.
+ virtual void GetLastMediaError(uint32* ssrc,
+ VoiceMediaChannel::Error* error);
+ bool FindSsrc(int gips_channel, uint32* ssrc);
+ void OnError(uint32 ssrc, int error);
+
+ protected:
+ int GetLastGipsError() { return engine()->GetLastGipsError(); }
+ int GetChannel(uint32 ssrc);
+ int GetOutputLevel(int channel);
+ bool GetRedSendCodec(const AudioCodec& red_codec,
+ const std::vector<AudioCodec>& all_codecs,
+ GIPS_CodecInst* send_codec);
+ bool EnableRtcp(int channel);
+ bool SetPlayout(int channel, bool playout);
+ static uint32 ParseSsrc(const void* data, size_t len, bool rtcp);
+ static Error GipsErrorToChannelError(int err_code);
+
+ private:
+ // Tandberg-bridged conferences require a -10dB gain adjustment,
+ // which is actually +10 in GIPS_AGC_config.targetLeveldBOv
+ static const int kTandbergDbAdjustment = 10;
+
+ bool ChangePlayout(bool playout);
+ bool ChangeSend(SendFlags send);
+
+ typedef std::map<uint32, int> ChannelMap;
+ talk_base::scoped_ptr<GipsSoundclipStream> ringback_tone_;
+ std::set<int> ringback_channels_; // channels playing ringback
+ int channel_options_;
+ bool agc_adjusted_;
+ bool dtmf_allowed_;
+ bool desired_playout_;
+ bool playout_;
+ SendFlags desired_send_;
+ SendFlags send_;
+ ChannelMap mux_channels_; // for multiple sources
+ // mux_channels_ can be read from GIPS callback thread. Accesses off the
+ // GIPS thread must be synchronized with edits on the worker thread. Reads
+ // on the worker thread are ok.
+ mutable talk_base::CriticalSection mux_channels_cs_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_PHONE_GIPSMEDIAENGINE_H_
diff --git a/talk/session/phone/gipsmediaengine_unittest.cc b/talk/session/phone/gipsmediaengine_unittest.cc
new file mode 100644
index 0000000..9824bba
--- /dev/null
+++ b/talk/session/phone/gipsmediaengine_unittest.cc
@@ -0,0 +1,1260 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+//
+// Author: Justin Uberti (juberti@google.com)
+
+#include "talk/base/byteorder.h"
+#include "talk/base/gunit.h"
+#include "talk/session/phone/channel.h"
+#include "talk/session/phone/fakegipsvoiceengine.h"
+#include "talk/session/phone/fakemediaengine.h"
+#include "talk/session/phone/fakemediaprocessor.h"
+#include "talk/session/phone/fakertp.h"
+#include "talk/session/phone/fakesession.h"
+#include "talk/session/phone/gipsmediaengine.h"
+
+// Tests for the GipsVoiceEngine/VoiceChannel code.
+
+static const cricket::AudioCodec kPcmuCodec(0, "PCMU", 8000, 64000, 1, 0);
+static const cricket::AudioCodec kIsacCodec(103, "ISAC", 16000, -1, 1, 0);
+static const cricket::AudioCodec kRedCodec(117, "red", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCn8000Codec(13, "CN", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCn16000Codec(105, "CN", 16000, 0, 1, 0);
+static const cricket::AudioCodec
+ kTelephoneEventCodec(106, "telephone-event", 8000, 0, 1, 0);
+static const cricket::AudioCodec* const kAudioCodecs[] = {
+ &kPcmuCodec, &kIsacCodec, &kRedCodec, &kCn8000Codec, &kCn16000Codec,
+ &kTelephoneEventCodec,
+};
+const char kRingbackTone[] = "RIFF____WAVE____ABCD1234";
+
+class FakeGipsWrapper : public GipsWrapper {
+ public:
+ explicit FakeGipsWrapper(cricket::FakeGIPSVoiceEngine* engine)
+ : GipsWrapper(engine, // base
+ engine, // codec
+ engine, // dtmf
+ engine, // file
+ engine, // hw
+ engine, // media
+ NULL, // neteq
+ engine, // network
+ engine, // rtp
+ engine, // sync
+ engine, // volume
+ engine) { // vqe
+ }
+};
+
+class NullGipsTraceWrapper : public GipsTraceWrapper {
+ public:
+ virtual int SetTraceFilter(const unsigned int filter) {
+ return 0;
+ }
+ virtual int SetTraceFile(const char* fileNameUTF8) {
+ return 0;
+ }
+ virtual int SetEncryptedTraceFile(const char* fileNameUTF8) {
+ return 0;
+ }
+ virtual int SetTraceCallback(GIPSTraceCallback* callback) {
+ return 0;
+ }
+};
+
+class GipsVoiceEngineTest : public testing::Test {
+ public:
+ class ChannelErrorListener : public sigslot::has_slots<> {
+ public:
+ explicit ChannelErrorListener(cricket::VoiceMediaChannel* channel)
+ : ssrc_(0), error_(cricket::VoiceMediaChannel::ERROR_NONE) {
+ ASSERT(channel != NULL);
+ channel->SignalMediaError.connect(
+ this, &ChannelErrorListener::OnVoiceChannelError);
+ }
+ void OnVoiceChannelError(uint32 ssrc,
+ cricket::VoiceMediaChannel::Error error) {
+ ssrc_ = ssrc;
+ error_ = error;
+ }
+ void Reset() {
+ ssrc_ = 0;
+ error_ = cricket::VoiceMediaChannel::ERROR_NONE;
+ }
+ uint32 ssrc() const {
+ return ssrc_;
+ }
+ cricket::VoiceMediaChannel::Error error() const {
+ return error_;
+ }
+
+ private:
+ uint32 ssrc_;
+ cricket::VoiceMediaChannel::Error error_;
+ };
+ // TODO(juberti): Implement other stub interfaces (VQE)
+ GipsVoiceEngineTest()
+ : gips_(kAudioCodecs, ARRAY_SIZE(kAudioCodecs)),
+ gips_sc_(kAudioCodecs, ARRAY_SIZE(kAudioCodecs)),
+ engine_(new FakeGipsWrapper(&gips_), new FakeGipsWrapper(&gips_sc_),
+ new NullGipsTraceWrapper()),
+ channel_(NULL), soundclip_(NULL) {
+ }
+ bool SetupEngine() {
+ bool result = engine_.Init();
+ if (result) {
+ channel_ = engine_.CreateChannel();
+ result = (channel_ != NULL);
+ }
+ return result;
+ }
+ void DeliverPacket(const void* data, int len) {
+ talk_base::Buffer packet(data, len);
+ channel_->OnPacketReceived(&packet);
+ }
+ virtual void TearDown() {
+ delete soundclip_;
+ delete channel_;
+ engine_.Terminate();
+ }
+
+ protected:
+ cricket::FakeGIPSVoiceEngine gips_;
+ cricket::FakeGIPSVoiceEngine gips_sc_;
+ cricket::GipsVoiceEngine engine_;
+ cricket::VoiceMediaChannel* channel_;
+ cricket::SoundclipMedia* soundclip_;
+};
+
+// Tests that our stub library "works".
+TEST_F(GipsVoiceEngineTest, StartupShutdown) {
+ EXPECT_FALSE(gips_.IsInited());
+ EXPECT_FALSE(gips_sc_.IsInited());
+ EXPECT_TRUE(engine_.Init());
+ EXPECT_TRUE(gips_.IsInited());
+ EXPECT_TRUE(gips_sc_.IsInited());
+ engine_.Terminate();
+ EXPECT_FALSE(gips_.IsInited());
+ EXPECT_FALSE(gips_sc_.IsInited());
+}
+
+// Tests that we can create and destroy a channel.
+TEST_F(GipsVoiceEngineTest, CreateChannel) {
+ EXPECT_TRUE(engine_.Init());
+ channel_ = engine_.CreateChannel();
+ EXPECT_TRUE(channel_ != NULL);
+}
+
+// Tests that we properly handle failures in CreateChannel.
+TEST_F(GipsVoiceEngineTest, CreateChannelFail) {
+ gips_.set_fail_create_channel(true);
+ EXPECT_TRUE(engine_.Init());
+ channel_ = engine_.CreateChannel();
+ EXPECT_TRUE(channel_ == NULL);
+}
+
+// Tests that we can find codecs by name or id, and that we interpret the
+// clockrate and bitrate fields properly.
+TEST_F(GipsVoiceEngineTest, FindCodec) {
+ cricket::AudioCodec codec;
+ GIPS_CodecInst codec_inst;
+ // Find PCMU with explicit clockrate and bitrate.
+ EXPECT_TRUE(engine_.FindGIPSCodec(kPcmuCodec, &codec_inst));
+ // Find ISAC with explicit clockrate and 0 bitrate.
+ EXPECT_TRUE(engine_.FindGIPSCodec(kIsacCodec, &codec_inst));
+ // Find telephone-event with explicit clockrate and 0 bitrate.
+ EXPECT_TRUE(engine_.FindGIPSCodec(kTelephoneEventCodec, &codec_inst));
+ // Find ISAC with a different payload id.
+ codec = kIsacCodec;
+ codec.id = 127;
+ EXPECT_TRUE(engine_.FindGIPSCodec(codec, &codec_inst));
+ // Find PCMU with a 0 clockrate.
+ codec = kPcmuCodec;
+ codec.clockrate = 0;
+ EXPECT_TRUE(engine_.FindGIPSCodec(codec, &codec_inst));
+ EXPECT_EQ(8000, codec_inst.plfreq);
+ // Find PCMU with a 0 bitrate.
+ codec = kPcmuCodec;
+ codec.bitrate = 0;
+ EXPECT_TRUE(engine_.FindGIPSCodec(codec, &codec_inst));
+ EXPECT_EQ(64000, codec_inst.rate);
+ // Find ISAC with an explicit bitrate.
+ codec = kIsacCodec;
+ codec.bitrate = 32000;
+ EXPECT_TRUE(engine_.FindGIPSCodec(codec, &codec_inst));
+ EXPECT_EQ(32000, codec_inst.rate);
+}
+
+// Test that we set our inbound codecs properly, including changing PT.
+TEST_F(GipsVoiceEngineTest, SetRecvCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 96;
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+ gcodec.plfreq = 16000;
+ EXPECT_EQ(0, gips_.GIPSVE_GetRecPayloadType(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+}
+
+// Test that we fail to set an unknown inbound codec.
+TEST_F(GipsVoiceEngineTest, SetRecvCodecsUnsupportedCodec) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(cricket::AudioCodec(127, "XYZ", 32000, 0, 1, 0));
+ EXPECT_FALSE(channel_->SetRecvCodecs(codecs));
+}
+
+// Test that we apply codecs properly.
+TEST_F(GipsVoiceEngineTest, SetSendCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kRedCodec);
+ codecs[0].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(gips_.GetVAD(channel_num));
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+ EXPECT_EQ(13, gips_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(105, gips_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(106, gips_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we fall back to PCMU if no codecs are specified.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsNoCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.pltype);
+ EXPECT_STREQ("PCMU", gcodec.plname);
+ EXPECT_FALSE(gips_.GetVAD(channel_num));
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+ EXPECT_EQ(13, gips_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(105, gips_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(106, gips_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we set VAD and DTMF types correctly.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsCNandDTMF) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ // TODO(juberti): cn 32000
+ codecs.push_back(kCn16000Codec);
+ codecs.push_back(kCn8000Codec);
+ codecs.push_back(kTelephoneEventCodec);
+ codecs.push_back(kRedCodec);
+ codecs[0].id = 96;
+ codecs[2].id = 97; // wideband CN
+ codecs[4].id = 98; // DTMF
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(gips_.GetVAD(channel_num));
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+ EXPECT_EQ(13, gips_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(97, gips_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(98, gips_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we perform case-insensitive matching of codec names.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsCaseInsensitive) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kCn16000Codec);
+ codecs.push_back(kCn8000Codec);
+ codecs.push_back(kTelephoneEventCodec);
+ codecs.push_back(kRedCodec);
+ codecs[0].name = "iSaC";
+ codecs[0].id = 96;
+ codecs[2].id = 97; // wideband CN
+ codecs[4].id = 98; // DTMF
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(gips_.GetVAD(channel_num));
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+ EXPECT_EQ(13, gips_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(97, gips_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(98, gips_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we set up FEC correctly.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsRED) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "96/96";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(gips_.GetFEC(channel_num));
+ EXPECT_EQ(127, gips_.GetSendFECPayloadType(channel_num));
+}
+
+// Test that we set up FEC correctly if params are omitted.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsREDNoParams) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(gips_.GetFEC(channel_num));
+ EXPECT_EQ(127, gips_.GetSendFECPayloadType(channel_num));
+}
+
+// Test that we ignore RED if the parameters aren't named the way we expect.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsBadRED1) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params["ABC"] = "96/96";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it uses different primary/secondary encoding.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsBadRED2) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "96/0";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it uses more than 2 encodings.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsBadRED3) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "96/96/96";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it has bogus codec ids.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsBadRED4) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "ABC/ABC";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it refers to a codec that is not present.
+TEST_F(GipsVoiceEngineTest, SetSendCodecsBadRED5) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "97/97";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ GIPS_CodecInst gcodec;
+ EXPECT_EQ(0, gips_.GIPSVE_GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(gips_.GetFEC(channel_num));
+}
+
+// Test that we support setting an empty list of recv header extensions.
+TEST_F(GipsVoiceEngineTest, SetRecvRtpHeaderExtensions) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::RtpHeaderExtension> extensions;
+ int channel_num = gips_.GetLastChannel();
+ bool enable = false;
+ unsigned char id = 0;
+
+ // An empty list shouldn't cause audio-level headers to be enabled.
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, gips_.GIPSVE_GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_FALSE(enable);
+
+ // Nor should indicating we can receive the audio-level header.
+ extensions.push_back(cricket::RtpHeaderExtension(
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, gips_.GIPSVE_GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_FALSE(enable);
+}
+
+// Test that we support setting certain send header extensions.
+TEST_F(GipsVoiceEngineTest, SetSendRtpHeaderExtensions) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::RtpHeaderExtension> extensions;
+ int channel_num = gips_.GetLastChannel();
+ bool enable = false;
+ unsigned char id = 0;
+
+ // Ensure audio levels are off by default.
+ EXPECT_EQ(0, gips_.GIPSVE_GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_FALSE(enable);
+
+ // Ensure audio levels stay off with an empty list of headers.
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, gips_.GIPSVE_GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_FALSE(enable);
+
+ // Ensure audio levels are enabled if the audio-level header is specified.
+ extensions.push_back(cricket::RtpHeaderExtension(
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, gips_.GIPSVE_GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_TRUE(enable);
+ EXPECT_EQ(8, id);
+
+ // Ensure audio levels go back off with an empty list.
+ extensions.clear();
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, gips_.GIPSVE_GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_FALSE(enable);
+}
+
+// Test that we can create a channel and start sending/playing out on it.
+TEST_F(GipsVoiceEngineTest, SendAndPlayout) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(gips_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ EXPECT_FALSE(gips_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num));
+}
+
+// Test that we can set the devices to use.
+TEST_F(GipsVoiceEngineTest, SetDevices) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
+ cricket::kFakeDefaultDeviceId);
+ cricket::Device dev(cricket::kFakeDeviceName,
+ cricket::kFakeDeviceId);
+
+ // Test SetDevices() while not sending or playing.
+ EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
+
+ // Test SetDevices() while sending and playing.
+ EXPECT_TRUE(engine_.SetLocalMonitor(true));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(gips_.GetRecordingMicrophone());
+ EXPECT_TRUE(gips_.GetSend(channel_num));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num));
+
+ EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+ EXPECT_TRUE(gips_.GetRecordingMicrophone());
+ EXPECT_TRUE(gips_.GetSend(channel_num));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num));
+
+ // Test that failure to open newly selected devices does not prevent opening
+ // ones after that.
+ gips_.set_fail_start_recording_microphone(true);
+ gips_.set_playout_fail_channel(channel_num);
+ gips_.set_send_fail_channel(channel_num);
+
+ EXPECT_FALSE(engine_.SetDevices(&default_dev, &default_dev));
+
+ EXPECT_FALSE(gips_.GetRecordingMicrophone());
+ EXPECT_FALSE(gips_.GetSend(channel_num));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num));
+
+ gips_.set_fail_start_recording_microphone(false);
+ gips_.set_playout_fail_channel(-1);
+ gips_.set_send_fail_channel(-1);
+
+ EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+ EXPECT_TRUE(gips_.GetRecordingMicrophone());
+ EXPECT_TRUE(gips_.GetSend(channel_num));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num));
+}
+
+// Test that we can set the devices to use even if we failed to open the initial
+// ones.
+TEST_F(GipsVoiceEngineTest, SetDevicesWithInitiallyBadDevices) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
+ cricket::kFakeDefaultDeviceId);
+ cricket::Device dev(cricket::kFakeDeviceName,
+ cricket::kFakeDeviceId);
+
+ // Test that failure to open devices selected before starting send/play does
+ // not prevent opening newly selected ones after that.
+ gips_.set_fail_start_recording_microphone(true);
+ gips_.set_playout_fail_channel(channel_num);
+ gips_.set_send_fail_channel(channel_num);
+
+ EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
+
+ EXPECT_FALSE(engine_.SetLocalMonitor(true));
+ EXPECT_FALSE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_FALSE(channel_->SetPlayout(true));
+ EXPECT_FALSE(gips_.GetRecordingMicrophone());
+ EXPECT_FALSE(gips_.GetSend(channel_num));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num));
+
+ gips_.set_fail_start_recording_microphone(false);
+ gips_.set_playout_fail_channel(-1);
+ gips_.set_send_fail_channel(-1);
+
+ EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+ EXPECT_TRUE(gips_.GetRecordingMicrophone());
+ EXPECT_TRUE(gips_.GetSend(channel_num));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num));
+}
+
+// Test that we can create a channel configured for multi-point conferences,
+// and start sending/playing out on it.
+TEST_F(GipsVoiceEngineTest, ConferenceSendAndPlayout) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ EXPECT_TRUE(channel_->SetOptions(cricket::OPT_CONFERENCE));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(gips_.GetSend(channel_num));
+
+ bool enabled;
+ GIPS_ECModes ec_mode;
+ GIPS_AESModes aes_mode;
+ int aes_attn;
+ GIPS_NSModes ns_mode;
+ EXPECT_EQ(0, gips_.GIPSVE_GetECStatus(enabled, ec_mode, aes_mode, aes_attn));
+#ifdef CHROMEOS
+ EXPECT_EQ(GIPS_EC_DEFAULT, ec_mode);
+#else
+ EXPECT_EQ(GIPS_EC_CONFERENCE, ec_mode);
+#endif
+ EXPECT_EQ(0, gips_.GIPSVE_GetNSStatus(enabled, ns_mode));
+ EXPECT_TRUE(enabled);
+#ifdef CHROMEOS
+ EXPECT_EQ(GIPS_NS_DEFAULT, ns_mode);
+#else
+ EXPECT_EQ(GIPS_NS_CONFERENCE, ns_mode);
+#endif
+
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ EXPECT_FALSE(gips_.GetSend(channel_num));
+
+ EXPECT_EQ(0, gips_.GIPSVE_GetECStatus(enabled, ec_mode, aes_mode, aes_attn));
+ EXPECT_EQ(GIPS_EC_DEFAULT, ec_mode);
+ EXPECT_EQ(0, gips_.GIPSVE_GetNSStatus(enabled, ns_mode));
+ EXPECT_EQ(GIPS_NS_DEFAULT, ns_mode);
+
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num));
+}
+
+// Test that we can create a channel configured for Codian bridges,
+// and start sending/playing out on it.
+TEST_F(GipsVoiceEngineTest, CodianSendAndPlayout) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ GIPS_AGC_config agc_config;
+ EXPECT_EQ(0, gips_.GIPSVE_GetAGCConfig(agc_config));
+ EXPECT_EQ(0, agc_config.targetLeveldBOv);
+ EXPECT_TRUE(channel_->SetOptions(cricket::OPT_AGC_TANDBERG_LEVELS));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(gips_.GetSend(channel_num));
+ EXPECT_EQ(0, gips_.GIPSVE_GetAGCConfig(agc_config));
+ EXPECT_GT(agc_config.targetLeveldBOv, 0); // level was attenuated
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ EXPECT_FALSE(gips_.GetSend(channel_num));
+ EXPECT_EQ(0, gips_.GIPSVE_GetAGCConfig(agc_config));
+ EXPECT_EQ(0, agc_config.targetLeveldBOv); // level was restored
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num));
+}
+
+// Test that we can set the outgoing SSRC properly.
+TEST_F(GipsVoiceEngineTest, SetSendSsrc) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ unsigned int send_ssrc;
+ EXPECT_EQ(0, gips_.GIPSVE_GetLocalSSRC(channel_num, send_ssrc));
+ EXPECT_NE(0U, send_ssrc);
+ channel_->SetSendSsrc(0x99);
+ EXPECT_EQ(0, gips_.GIPSVE_GetLocalSSRC(channel_num, send_ssrc));
+ EXPECT_EQ(0x99U, send_ssrc);
+}
+
+// Test that we can properly receive packets.
+TEST_F(GipsVoiceEngineTest, Recv) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_TRUE(gips_.CheckPacket(channel_num, kPcmuFrame, sizeof(kPcmuFrame)));
+}
+
+// Test that we can add and remove streams, and do proper send/playout.
+// We can receive on multiple streams, but will only send on one.
+TEST_F(GipsVoiceEngineTest, SendAndPlayoutWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num1 = gips_.GetLastChannel();
+
+ // Start playout on the default channel.
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num1));
+
+ // Adding another stream should disable playout on the default channel.
+ EXPECT_TRUE(channel_->AddStream(2));
+ int channel_num2 = gips_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(gips_.GetSend(channel_num1));
+ EXPECT_FALSE(gips_.GetSend(channel_num2));
+
+ // Make sure only the new channel is played out.
+ EXPECT_FALSE(gips_.GetPlayout(channel_num1));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num2));
+
+ // Adding yet another stream should have stream 2 and 3 enabled for playout.
+ EXPECT_TRUE(channel_->AddStream(3));
+ int channel_num3 = gips_.GetLastChannel();
+ EXPECT_FALSE(gips_.GetPlayout(channel_num1));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num2));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num3));
+ EXPECT_FALSE(gips_.GetSend(channel_num3));
+
+ // Stop sending.
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ EXPECT_FALSE(gips_.GetSend(channel_num1));
+ EXPECT_FALSE(gips_.GetSend(channel_num2));
+ EXPECT_FALSE(gips_.GetSend(channel_num3));
+
+ // Stop playout.
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num1));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num2));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num3));
+
+ // Restart playout and make sure the default channel still is not played out.
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_FALSE(gips_.GetPlayout(channel_num1));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num2));
+ EXPECT_TRUE(gips_.GetPlayout(channel_num3));
+
+ // Now remove the new streams and verify that the default channel is
+ // played out again.
+ EXPECT_TRUE(channel_->RemoveStream(3));
+ EXPECT_TRUE(channel_->RemoveStream(2));
+
+ EXPECT_TRUE(gips_.GetPlayout(channel_num1));
+}
+
+// Test that we can set the outgoing SSRC properly with multiple streams.
+TEST_F(GipsVoiceEngineTest, SetSendSsrcWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num1 = gips_.GetLastChannel();
+ unsigned int send_ssrc;
+ channel_->SetSendSsrc(0x99);
+ EXPECT_EQ(0, gips_.GIPSVE_GetLocalSSRC(channel_num1, send_ssrc));
+ EXPECT_EQ(0x99U, send_ssrc);
+ EXPECT_TRUE(channel_->AddStream(2));
+ int channel_num2 = gips_.GetLastChannel();
+ EXPECT_EQ(0, gips_.GIPSVE_GetLocalSSRC(channel_num2, send_ssrc));
+ EXPECT_EQ(0x99U, send_ssrc);
+}
+
+// Test that we properly handle failures to add a stream.
+TEST_F(GipsVoiceEngineTest, AddStreamFail) {
+ EXPECT_TRUE(SetupEngine());
+ gips_.set_fail_create_channel(true);
+ EXPECT_FALSE(channel_->AddStream(2));
+}
+
+// Test that we can properly receive packets on multiple streams.
+TEST_F(GipsVoiceEngineTest, RecvWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->AddStream(1));
+ int channel_num1 = gips_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddStream(2));
+ int channel_num2 = gips_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddStream(3));
+ int channel_num3 = gips_.GetLastChannel();
+ // Create packets with the right SSRCs.
+ char packets[4][sizeof(kPcmuFrame)];
+ for (size_t i = 0; i < ARRAY_SIZE(packets); ++i) {
+ memcpy(packets[i], kPcmuFrame, sizeof(kPcmuFrame));
+ talk_base::SetBE32(packets[i] + 8, i);
+ }
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[0], sizeof(packets[0]));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[1], sizeof(packets[1]));
+ EXPECT_TRUE(gips_.CheckPacket(channel_num1, packets[1], sizeof(packets[1])));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[2], sizeof(packets[2]));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(gips_.CheckPacket(channel_num2, packets[2], sizeof(packets[2])));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[3], sizeof(packets[3]));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(gips_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(gips_.CheckPacket(channel_num3, packets[3], sizeof(packets[3])));
+ EXPECT_TRUE(channel_->RemoveStream(3));
+ EXPECT_TRUE(channel_->RemoveStream(2));
+ EXPECT_TRUE(channel_->RemoveStream(1));
+}
+
+// Test that we properly clean up any streams that were added, even if
+// not explicitly removed.
+TEST_F(GipsVoiceEngineTest, StreamCleanup) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->AddStream(1));
+ EXPECT_TRUE(channel_->AddStream(2));
+ EXPECT_EQ(3, gips_.GetNumChannels()); // default channel + 2 added
+ delete channel_;
+ channel_ = NULL;
+ EXPECT_EQ(0, gips_.GetNumChannels());
+}
+
+// Test that we can send DTMF properly, but only if the other side supports
+// telephone-event.
+TEST_F(GipsVoiceEngineTest, SendDtmf) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_FALSE(channel_->PressDTMF(1, true));
+ codecs.push_back(kTelephoneEventCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->PressDTMF(1, true));
+}
+
+// Test that we can play a ringback tone properly in a single-stream call.
+TEST_F(GipsVoiceEngineTest, PlayRingback) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = gips_.GetLastChannel();
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we fail if no ringback tone specified.
+ EXPECT_FALSE(channel_->PlayRingbackTone(0, true, true));
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we can set and play a ringback tone.
+ EXPECT_TRUE(channel_->SetRingbackTone(kRingbackTone, strlen(kRingbackTone)));
+ EXPECT_TRUE(channel_->PlayRingbackTone(0, true, true));
+ EXPECT_EQ(1, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we can stop the tone manually.
+ EXPECT_TRUE(channel_->PlayRingbackTone(0, false, false));
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we stop the tone if a packet arrives.
+ EXPECT_TRUE(channel_->PlayRingbackTone(0, true, true));
+ EXPECT_EQ(1, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+}
+
+// Test that we can play a ringback tone properly in a multi-stream call.
+TEST_F(GipsVoiceEngineTest, PlayRingbackWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->AddStream(1));
+ EXPECT_TRUE(channel_->AddStream(2));
+ int channel_num = gips_.GetLastChannel();
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we fail if no ringback tone specified.
+ EXPECT_FALSE(channel_->PlayRingbackTone(2, true, true));
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we can set and play a ringback tone on the correct ssrc.
+ EXPECT_TRUE(channel_->SetRingbackTone(kRingbackTone, strlen(kRingbackTone)));
+ EXPECT_FALSE(channel_->PlayRingbackTone(77, true, true));
+ EXPECT_TRUE(channel_->PlayRingbackTone(2, true, true));
+ EXPECT_EQ(1, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we can stop the tone manually.
+ EXPECT_TRUE(channel_->PlayRingbackTone(2, false, false));
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Check we stop the tone if a packet arrives, but only with the right SSRC.
+ EXPECT_TRUE(channel_->PlayRingbackTone(2, true, true));
+ EXPECT_EQ(1, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Send a packet with SSRC 1; the tone should not stop.
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_EQ(1, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+ // Send a packet with SSRC 2; the tone should stop.
+ char packet[sizeof(kPcmuFrame)];
+ memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+ talk_base::SetBE32(packet + 8, 2);
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_EQ(0, gips_.GIPSVE_IsPlayingFileLocally(channel_num));
+}
+
+// Tests creating soundclips, and make sure they come from the right engine.
+TEST_F(GipsVoiceEngineTest, CreateSoundclip) {
+ EXPECT_TRUE(engine_.Init());
+ soundclip_ = engine_.CreateSoundclip();
+ ASSERT_TRUE(soundclip_ != NULL);
+ EXPECT_EQ(0, gips_.GetNumChannels());
+ EXPECT_EQ(1, gips_sc_.GetNumChannels());
+ int channel_num = gips_sc_.GetLastChannel();
+ EXPECT_TRUE(gips_sc_.GetPlayout(channel_num));
+ delete soundclip_;
+ soundclip_ = NULL;
+ EXPECT_EQ(0, gips_sc_.GetNumChannels());
+}
+
+// Tests playing out a fake sound.
+TEST_F(GipsVoiceEngineTest, PlaySoundclip) {
+ static const char kZeroes[16000] = {};
+ EXPECT_TRUE(engine_.Init());
+ soundclip_ = engine_.CreateSoundclip();
+ ASSERT_TRUE(soundclip_ != NULL);
+ EXPECT_TRUE(soundclip_->PlaySound(kZeroes, sizeof(kZeroes), 0));
+}
+
+TEST_F(GipsVoiceEngineTest, MediaEngineCallbackOnError) {
+ talk_base::scoped_ptr<ChannelErrorListener> listener;
+ cricket::GipsVoiceMediaChannel* media_channel;
+ unsigned int ssrc = 0;
+
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+
+ media_channel = reinterpret_cast<cricket::GipsVoiceMediaChannel*>(channel_);
+ listener.reset(new ChannelErrorListener(channel_));
+
+ // Test on gips channel.
+ gips_.TriggerCallbackOnError(media_channel->gips_channel(),
+ VE_SATURATION_WARNING);
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_DEVICE_SATURATION,
+ listener->error());
+ EXPECT_NE(-1, gips_.GIPSVE_GetLocalSSRC(gips_.GetLastChannel(), ssrc));
+ EXPECT_EQ(ssrc, listener->ssrc());
+
+ listener->Reset();
+ gips_.TriggerCallbackOnError(-1, VE_TYPING_NOISE_WARNING);
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_TYPING_NOISE_DETECTED,
+ listener->error());
+ EXPECT_EQ(0U, listener->ssrc());
+
+ // Add another stream and test on that.
+ ++ssrc;
+ EXPECT_TRUE(channel_->AddStream(ssrc));
+ listener->Reset();
+ gips_.TriggerCallbackOnError(gips_.GetLastChannel(),
+ VE_SATURATION_WARNING);
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_DEVICE_SATURATION,
+ listener->error());
+ EXPECT_EQ(ssrc, listener->ssrc());
+
+ // Testing a non-existing channel.
+ listener->Reset();
+ gips_.TriggerCallbackOnError(gips_.GetLastChannel() + 2,
+ VE_SATURATION_WARNING);
+ EXPECT_EQ(0, listener->error());
+}
+
+TEST_F(GipsVoiceEngineTest, TestSetPlayoutError) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(channel_->AddStream(2));
+ EXPECT_TRUE(channel_->AddStream(3));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ gips_.set_playout_fail_channel(gips_.GetLastChannel() - 1);
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(channel_->SetPlayout(true));
+}
+
+// Test that we can scale the output volume properly for 1:1 calls.
+TEST_F(GipsVoiceEngineTest, SetOutputScaling1to1Call) {
+ EXPECT_TRUE(SetupEngine());
+ double left, right;
+
+ // Default is (1.0, 1.0).
+ EXPECT_TRUE(channel_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ // invalid ssrc.
+ EXPECT_FALSE(channel_->GetOutputScaling(3, &left, &right));
+
+ // Set scale to (2.0, 0.5).
+ EXPECT_TRUE(channel_->SetOutputScaling(0, 2.0, 0.5));
+ EXPECT_TRUE(channel_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(2.0, left);
+ EXPECT_DOUBLE_EQ(0.5, right);
+
+ // Set scale to (0, 0).
+ EXPECT_TRUE(channel_->SetOutputScaling(0, 0.0, 0.0));
+ EXPECT_TRUE(channel_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+}
+
+// Test that we can scale the output volume properly for multiway calls.
+TEST_F(GipsVoiceEngineTest, SetOutputScalingMultiwayCall) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->AddStream(1));
+ EXPECT_TRUE(channel_->AddStream(2));
+ double left, right;
+
+ // Default is (1.0, 1.0).
+ EXPECT_TRUE(channel_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ EXPECT_TRUE(channel_->GetOutputScaling(1, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ EXPECT_TRUE(channel_->GetOutputScaling(2, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ // invalid ssrc.
+ EXPECT_FALSE(channel_->GetOutputScaling(3, &left, &right));
+
+
+ // Set scale to (2.0, 0.5) for ssrc = 1.
+ EXPECT_TRUE(channel_->SetOutputScaling(1, 2.0, 0.5));
+ EXPECT_TRUE(channel_->GetOutputScaling(1, &left, &right));
+ EXPECT_DOUBLE_EQ(2.0, left);
+ EXPECT_DOUBLE_EQ(0.5, right);
+ EXPECT_TRUE(channel_->GetOutputScaling(2, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ EXPECT_TRUE(channel_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+
+ // Set scale to (0, 0) for all ssrcs.
+ EXPECT_TRUE(channel_->SetOutputScaling(0, 0.0, 0.0));
+ EXPECT_TRUE(channel_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+ EXPECT_TRUE(channel_->GetOutputScaling(1, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+ EXPECT_TRUE(channel_->GetOutputScaling(2, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+}
+
+// Tests for the actual GIPS library.
+
+// Tests that the library initializes and shuts down properly.
+TEST(GipsVoiceEngineLibTest, StartupShutdown) {
+ cricket::GipsVoiceEngine engine;
+ EXPECT_TRUE(engine.Init());
+ cricket::VoiceMediaChannel* channel = engine.CreateChannel();
+ EXPECT_TRUE(channel != NULL);
+ delete channel;
+ engine.Terminate();
+
+ // Reinit to catch regression where GIPSVoiceEngineObserver reference is lost
+ EXPECT_TRUE(engine.Init());
+ engine.Terminate();
+}
+
+// Tests that the logging from the library is cleartext.
+// TODO(zhurunz): This test case is disabled due to a known bug in GIPS4.0
+// which sends out truncated log message. Will be fixed in next GIPS release.
+TEST(GipsVoiceEngineLibTest, DISABLED_HasUnencryptedLogging) {
+ cricket::GipsVoiceEngine engine;
+ talk_base::scoped_ptr<talk_base::MemoryStream> stream(
+ new talk_base::MemoryStream);
+ size_t size = 0;
+ bool cleartext = true;
+ talk_base::LogMessage::AddLogToStream(stream.get(), talk_base::LS_VERBOSE);
+ engine.SetLogging(talk_base::LS_VERBOSE, "");
+ EXPECT_TRUE(engine.Init());
+ EXPECT_TRUE(stream->GetSize(&size));
+ EXPECT_GT(size, 0U);
+ engine.Terminate();
+ talk_base::LogMessage::RemoveLogToStream(stream.get());
+ const char* buf = stream->GetBuffer();
+ for (size_t i = 0; i < size && cleartext; ++i) {
+ int ch = static_cast<int>(buf[i]);
+ ASSERT_GE(ch, 0) << "Out of bounds character in gips log: "
+ << std::hex << ch;
+ cleartext = (isprint(ch) || isspace(ch));
+ }
+ EXPECT_TRUE(cleartext);
+}
+
+// Tests we do not see any references to a monitor thread being spun up
+// when initiating the engine.
+TEST(GipsVoiceEngineLibTest, HasNoMonitorThread) {
+ cricket::GipsVoiceEngine engine;
+ talk_base::scoped_ptr<talk_base::MemoryStream> stream(
+ new talk_base::MemoryStream);
+ talk_base::LogMessage::AddLogToStream(stream.get(), talk_base::LS_VERBOSE);
+ engine.SetLogging(talk_base::LS_VERBOSE, "");
+ EXPECT_TRUE(engine.Init());
+ engine.Terminate();
+ talk_base::LogMessage::RemoveLogToStream(stream.get());
+
+ size_t size = 0;
+ EXPECT_TRUE(stream->GetSize(&size));
+ EXPECT_GT(size, 0U);
+ const std::string logs(stream->GetBuffer());
+ EXPECT_NE(std::string::npos, logs.find("GIPSModuleProcessThread"));
+#ifdef WIN32
+// TODO(whyuan): Enable this test with new gips lib for linux and Mac.
+ EXPECT_EQ(std::string::npos, logs.find("GIPS_monitor_thread"));
+#endif // WIN32
+}
+
+// Tests that the library is configured with the codecs we want.
+TEST(GipsVoiceEngineLibTest, HasCorrectCodecs) {
+ cricket::GipsVoiceEngine engine;
+ // Check codecs by name.
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "ISAC", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "ISAC", 32000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "ISACLC", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "iLBC", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "IPCMWB", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "EG711U", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "EG711A", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "PCMU", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "PCMA", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "speex", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "speex", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "G722", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "GSM", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "red", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "CN", 32000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "CN", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "CN", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "telephone-event", 8000, 0, 1, 0)));
+ // Check codecs with an id by id.
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(0, "", 8000, 0, 1, 0))); // PCMU
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(8, "", 8000, 0, 1, 0))); // PCMA
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(9, "", 16000, 0, 1, 0))); // G722
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(3, "", 8000, 0, 1, 0))); // GSMFR
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(13, "", 8000, 0, 1, 0))); // CN
+ // Check sample/bitrate matching.
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(0, "PCMU", 8000, 64000, 1, 0)));
+ // Check that bad codecs fail.
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(99, "ABCD", 0, 0, 1, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(88, "", 0, 0, 1, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 0, 2, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 5000, 0, 1, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 5000, 1, 0)));
+ // Check that there aren't any extra codecs lying around.
+ EXPECT_EQ(18U, engine.codecs().size());
+ // Verify the payload id of common audio codecs, including CN, ISAC, and G722.
+ // TODO(whyuan): GIPS may change the payload id.
+ for (std::vector<cricket::AudioCodec>::const_iterator it =
+ engine.codecs().begin(); it != engine.codecs().end(); ++it) {
+ if (it->name == "CN" && it->clockrate == 16000) {
+ EXPECT_EQ(105, it->id);
+ } else if (it->name == "CN" && it->clockrate == 32000) {
+ EXPECT_EQ(126, it->id);
+ } else if (it->name == "ISAC" && it->clockrate == 16000) {
+ EXPECT_EQ(103, it->id);
+ } else if (it->name == "ISAC" && it->clockrate == 32000) {
+ EXPECT_EQ(104, it->id);
+ } else if (it->name == "G722" && it->clockrate == 16000) {
+ EXPECT_EQ(9, it->id);
+ }
+ }
+
+ engine.Terminate();
+}
+
+// Tests that the list of supported codecs is created properly and ordered
+// correctly
+TEST_F(GipsVoiceEngineTest, CodecPreference) {
+ cricket::GipsVoiceEngine engine;
+ const std::vector<cricket::AudioCodec>& codecs = engine.codecs();
+ ASSERT_FALSE(codecs.empty());
+ EXPECT_EQ("ISAC", codecs[0].name);
+ EXPECT_EQ(16000, codecs[0].clockrate);
+ EXPECT_EQ(32000, codecs[0].bitrate);
+ int pref = codecs[0].preference;
+ for (size_t i = 1; i < codecs.size(); ++i) {
+ EXPECT_GT(pref, codecs[i].preference);
+ pref = codecs[i].preference;
+ }
+}
+
+TEST(GipsVoiceEngineLibTest, Has32Channels) {
+ cricket::GipsVoiceEngine engine;
+ EXPECT_TRUE(engine.Init());
+
+ cricket::VoiceMediaChannel* channels[32];
+ int num_channels = 0;
+
+ while (num_channels < ARRAY_SIZE(channels)) {
+ cricket::VoiceMediaChannel* channel = engine.CreateChannel();
+ if (!channel)
+ break;
+
+ channels[num_channels++] = channel;
+ }
+
+ int expected = ARRAY_SIZE(channels);
+ EXPECT_EQ(expected, num_channels);
+
+ while (num_channels > 0) {
+ delete channels[--num_channels];
+ }
+
+ engine.Terminate();
+}
+
+#ifdef WIN32
+// Test our workarounds to gips' munging of the coinit count
+TEST(GipsVoiceEngineLibTest, CoInitialize) {
+ cricket::GipsVoiceEngine* engine = new cricket::GipsVoiceEngine();
+
+ // Initial refcount should be 0.
+ EXPECT_EQ(S_OK, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+
+ // Engine should start even with COM already inited.
+ EXPECT_TRUE(engine->Init());
+ engine->Terminate();
+ EXPECT_TRUE(engine->Init());
+ engine->Terminate();
+
+ // Refcount after terminate should be 1 (in reality 3); test if it is nonzero.
+ EXPECT_EQ(S_FALSE, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+ // Decrement refcount to (hopefully) 0.
+ CoUninitialize();
+ CoUninitialize();
+ delete engine;
+
+ // Ensure refcount is 0.
+ EXPECT_EQ(S_OK, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+ CoUninitialize();
+}
+#endif