Add unit tests and MUC room config.

git-svn-id: http://libjingle.googlecode.com/svn/trunk@81 dd674b97-3498-5ee5-1854-bdd07cd0ff33
diff --git a/CHANGELOG b/CHANGELOG
index 81b3f09..96ab6f4 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -2,6 +2,7 @@
 
 0.5.9 - Aug 31, 2011
   - Add app/webrtc
+  - Add webrtcvoiceengine/webrtcvideoengine
   - Update STUN support some more (RFC 5389)
   - Add video output scaling
   - Refactoring and bug fixes
diff --git a/README b/README
index ccf22ea..f40d4ea 100644
--- a/README
+++ b/README
@@ -56,6 +56,13 @@
     set PATH_TO_SWTOOLKIT=c:\src\swtoolkit
     set PATH=%PATH_TO_SWTOOLKIT%;%PATH%
 
+  * Finally, download and install the unit test framework from
+    http://code.google.com/p/googletest/downloads/list
+    To install it, just unzip the package and put the file folder under
+    /talk/third_party/ directory and rename it from "gtest-x.x.x" to "gtest",
+    so that your folder structure looks like:
+    /talk/third_party/gtest/...
+
 2.2 libjingle
 
 Libjingle needs to be downloaded and patched
diff --git a/talk/app/webrtc/webrtc.scons b/talk/app/webrtc/webrtc.scons
index 66f73af..2394e69 100644
--- a/talk/app/webrtc/webrtc.scons
+++ b/talk/app/webrtc/webrtc.scons
@@ -21,6 +21,8 @@
   env,
   name = 'webrtc',
   srcs = [
+    'peerconnection_unittest.cc',
+    'unittest_utilities.cc',
     'webrtcsession_unittest.cc',
   ],
   libs = [
diff --git a/talk/app/webrtc/webrtcsession_unittest.cc b/talk/app/webrtc/webrtcsession_unittest.cc
index 72610b9..648a5d6 100644
--- a/talk/app/webrtc/webrtcsession_unittest.cc
+++ b/talk/app/webrtc/webrtcsession_unittest.cc
@@ -31,6 +31,7 @@
 
 #include "base/gunit.h"
 #include "base/helpers.h"
+#include "talk/app/webrtc/unittest_utilities.h"
 #include "talk/app/webrtc/webrtcsession.h"
 #include "talk/base/fakenetwork.h"
 #include "talk/base/scoped_ptr.h"
@@ -41,131 +42,6 @@
 #include "talk/session/phone/fakesession.h"
 #include "talk/session/phone/mediasessionclient.h"
 
-namespace {
-cricket::VideoContentDescription* CopyVideoContentDescription(
-    const cricket::VideoContentDescription* video_description) {
-  cricket::VideoContentDescription* new_video_description =
-      new cricket::VideoContentDescription();
-  cricket::VideoCodecs::const_iterator iter =
-      video_description->codecs().begin();
-  for (; iter != video_description->codecs().end(); iter++) {
-    new_video_description->AddCodec(*iter);
-  }
-  new_video_description->SortCodecs();
-  return new_video_description;
-}
-
-cricket::AudioContentDescription* CopyAudioContentDescription(
-    const cricket::AudioContentDescription* audio_description) {
-  cricket::AudioContentDescription* new_audio_description =
-      new cricket::AudioContentDescription();
-  cricket::AudioCodecs::const_iterator iter =
-      audio_description->codecs().begin();
-  for (; iter != audio_description->codecs().end(); iter++) {
-    new_audio_description->AddCodec(*iter);
-  }
-  new_audio_description->SortCodecs();
-  return new_audio_description;
-}
-
-const cricket::ContentDescription* CopyContentDescription(
-    const cricket::ContentDescription* original) {
-  const cricket::MediaContentDescription* media =
-      static_cast<const cricket::MediaContentDescription*>(original);
-  const cricket::ContentDescription* new_content_description = NULL;
-  if (media->type() == cricket::MEDIA_TYPE_VIDEO) {
-    const cricket::VideoContentDescription* video_description =
-        static_cast<const cricket::VideoContentDescription*>(original);
-    new_content_description = static_cast<const cricket::ContentDescription*>
-        (CopyVideoContentDescription(video_description));
-  } else if (media->type() == cricket::MEDIA_TYPE_AUDIO) {
-    const cricket::AudioContentDescription* audio_description =
-        static_cast<const cricket::AudioContentDescription*>(original);
-    new_content_description = static_cast<const cricket::ContentDescription*>
-        (CopyAudioContentDescription(audio_description));
-  } else {
-    return NULL;
-  }
-  return new_content_description;
-}
-
-cricket::ContentInfos CopyContentInfos(const cricket::ContentInfos& original) {
-  cricket::ContentInfos new_content_infos;
-  for (cricket::ContentInfos::const_iterator iter = original.begin();
-       iter != original.end(); iter++) {
-    cricket::ContentInfo info;
-    info.name = (*iter).name;
-    info.type = (*iter).type;
-    info.description = CopyContentDescription((*iter).description);
-    new_content_infos.push_back(info);
-  }
-  return new_content_infos;
-}
-
-cricket::SessionDescription* CopySessionDescription(
-    const cricket::SessionDescription* original) {
-  const cricket::ContentInfos& content_infos = original->contents();
-  cricket::ContentInfos new_content_infos = CopyContentInfos(content_infos);
-  return new cricket::SessionDescription(new_content_infos);
-}
-
-cricket::SessionDescription* GenerateFakeSessionDescription(bool video) {
-  cricket::SessionDescription* fake_description =
-      new cricket::SessionDescription();
-  const std::string name = video ? std::string(cricket::CN_VIDEO) :
-                                   std::string(cricket::CN_AUDIO);
-  cricket::ContentDescription* description = NULL;
-  if (video) {
-    cricket::VideoContentDescription* video_dsc =
-        new cricket::VideoContentDescription;
-    video_dsc->SortCodecs();
-    description = static_cast<cricket::ContentDescription*>(video_dsc);
-  } else {
-    cricket::AudioContentDescription* audio_dsc =
-        new cricket::AudioContentDescription();
-    audio_dsc->SortCodecs();
-    description = static_cast<cricket::ContentDescription*>(audio_dsc);
-  }
-
-  // Cannot fail.
-  fake_description->AddContent(name, cricket::NS_JINGLE_RTP, description);
-  return fake_description;
-}
-
-void GenerateFakeCandidate(std::vector<cricket::Candidate>* candidates,
-                           bool video) {
-  // Next add a candidate.
-  // int port_index = 0;
-  std::string port_index_as_string("0");
-
-  cricket::Candidate candidate;
-  candidate.set_name("rtp");
-  candidate.set_protocol("udp");
-  talk_base::SocketAddress address("127.0.0.1", 1234);
-  candidate.set_address(address);
-  candidate.set_preference(1);
-  candidate.set_username("username" + port_index_as_string);
-  candidate.set_password(port_index_as_string);
-  candidate.set_type("local");
-  candidate.set_network_name("network");
-  candidate.set_generation(0);
-
-  candidates->push_back(candidate);
-}
-
-cricket::SessionDescription* GenerateFakeSession(
-    std::vector<cricket::Candidate>* candidates,
-    bool video) {
-  cricket::SessionDescription* fake_description =
-      GenerateFakeSessionDescription(video);
-  if (fake_description == NULL) {
-    return NULL;
-  }
-  GenerateFakeCandidate(candidates, video);
-  return fake_description;
-}
-}  // namespace
-
 class WebRtcSessionTest
     : public sigslot::has_slots<>,
       public testing::Test {
@@ -219,9 +95,7 @@
       const std::vector<cricket::Candidate>& candidates) {
     callback_ids_.push_back(kOnLocalDescription);
     last_description_ptr_.reset(CopySessionDescription(desc));
-    last_candidates_.clear();
-    last_candidates_.insert(last_candidates_.end(), candidates.begin(),
-                            candidates.end());
+    CopyCandidates(candidates, &last_candidates_);
   }
   cricket::SessionDescription* GetLocalDescription(
       std::vector<cricket::Candidate>* candidates) {
@@ -231,8 +105,7 @@
     if (!last_description_ptr_.get()) {
       return NULL;
     }
-    candidates->insert(candidates->end(), last_candidates_.begin(),
-                       last_candidates_.end());
+    CopyCandidates(last_candidates_, candidates);
     return CopySessionDescription(last_description_ptr_.get());
   }
 
@@ -315,7 +188,6 @@
     session_->SignalLocalDescription.connect(this,
         &WebRtcSessionTest::OnLocalDescription);
     session_->SignalFailedCall.connect(this, &WebRtcSessionTest::OnFailedCall);
-
     return true;
   }
 
@@ -512,14 +384,14 @@
 
   std::vector<cricket::Candidate> candidates;
   cricket::SessionDescription* local_session =
-      GenerateFakeSession(&candidates, video);
+      GenerateFakeSession(video, &candidates);
   ASSERT_FALSE(candidates.empty());
   ASSERT_FALSE(local_session == NULL);
-  // TODO: Figure out why the TransportChannel is not created.
-  // if (!CallOnInitiateMessage(local_session, candidates)) {
-  //    delete local_session;
-  //    FAIL();
-  // }
+  ASSERT_TRUE(CallInitiate());
+  if (!CallOnInitiateMessage(local_session, candidates)) {
+    delete local_session;
+    FAIL();
+  }
   ASSERT_TRUE(CallConnect());
   ASSERT_FALSE(CallbackReceived(this, 1000));
 
@@ -535,14 +407,14 @@
 
   std::vector<cricket::Candidate> candidates;
   cricket::SessionDescription* local_session =
-      GenerateFakeSession(&candidates, video);
+      GenerateFakeSession(video, &candidates);
   ASSERT_FALSE(candidates.empty());
   ASSERT_FALSE(local_session == NULL);
-  // TODO: Figure out why the TransportChannel is not created.
-  // if (!CallOnInitiateMessage(local_session, candidates)) {
-  //     delete local_session;
-  //     FAIL();
-  // }
+  ASSERT_TRUE(CallInitiate());
+  if (!CallOnInitiateMessage(local_session, candidates)) {
+    delete local_session;
+    FAIL();
+  }
   ASSERT_TRUE(CallConnect());
   ASSERT_FALSE(CallbackReceived(this, 1000));
   ASSERT_TRUE(!CallHasAudioChannel() &&
diff --git a/talk/base/gunit.h b/talk/base/gunit.h
new file mode 100644
index 0000000..9d04595
--- /dev/null
+++ b/talk/base/gunit.h
@@ -0,0 +1,129 @@
+/*
+ * libjingle
+ * Copyright 2004--2008, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_BASE_GUNIT_H_
+#define TALK_BASE_GUNIT_H_
+
+#include "talk/base/logging.h"
+#include "talk/base/thread.h"
+#ifdef ANDROID
+#include <gtest/gtest.h>
+#elif LIBJINGLE_UNITTEST
+#include "third_party/gtest/include/gtest/gtest.h"
+#else
+#include "testing/base/public/gunit.h"
+#endif
+
+// forward declarations
+namespace talk_base {
+class Pathname;
+}
+
+// Wait until "ex" is true, or "timeout" expires.
+#define WAIT(ex, timeout) \
+  for (uint32 start = talk_base::Time(); \
+      !(ex) && talk_base::Time() < start + timeout;) \
+    talk_base::Thread::Current()->ProcessMessages(1);
+
+// This returns the result of the test in res, so that we don't re-evaluate
+// the expression in the XXXX_WAIT macros below, since that causes problems
+// when the expression is only true the first time you check it.
+#define WAIT_(ex, timeout, res) \
+  do { \
+    uint32 start = talk_base::Time(); \
+    res = (ex); \
+    while (!res && talk_base::Time() < start + timeout) { \
+      talk_base::Thread::Current()->ProcessMessages(1); \
+      res = (ex); \
+    } \
+  } while (0);
+
+// The typical EXPECT_XXXX and ASSERT_XXXXs, but done until true or a timeout.
+#define EXPECT_TRUE_WAIT(ex, timeout) \
+  do { \
+    bool res; \
+    WAIT_(ex, timeout, res); \
+    if (!res) EXPECT_TRUE(ex); \
+  } while (0);
+
+#define EXPECT_EQ_WAIT(v1, v2, timeout) \
+  do { \
+    bool res; \
+    WAIT_(v1 == v2, timeout, res); \
+    if (!res) EXPECT_EQ(v1, v2); \
+  } while (0);
+
+#define ASSERT_TRUE_WAIT(ex, timeout) \
+  do { \
+    bool res; \
+    WAIT_(ex, timeout, res); \
+    if (!res) ASSERT_TRUE(ex); \
+  } while (0);
+
+#define ASSERT_EQ_WAIT(v1, v2, timeout) \
+  do { \
+    bool res; \
+    WAIT_(v1 == v2, timeout, res); \
+    if (!res) ASSERT_EQ(v1, v2); \
+  } while (0);
+
+// Version with a "soft" timeout and a margin. This logs if the timeout is
+// exceeded, but it only fails if the expression still isn't true after the
+// margin time passes.
+#define EXPECT_TRUE_WAIT_MARGIN(ex, timeout, margin) \
+  do { \
+    bool res; \
+    WAIT_(ex, timeout, res); \
+    if (res) { \
+      break; \
+    } \
+    LOG(LS_WARNING) << "Expression " << #ex << " still not true after " << \
+        timeout << "ms; waiting an additional " << margin << "ms"; \
+    WAIT_(ex, margin, res); \
+    if (!res) { \
+      EXPECT_TRUE(ex); \
+    } \
+  } while (0);
+
+#ifdef __LP64__
+// When compiling in 64-bit mode we redefine the TEST and TEST_F macros to
+// append _64bit to all testsuite names to distinguish the test results from
+// those for the 32-bit cross-built binaries.
+#undef TEST
+#undef TEST_F
+// Copied and adjusted from google3/third_party/gtest/include/gtest/gtest.h
+#define TEST(test_case_name, test_name)\
+  GTEST_TEST_(test_case_name##_64bit, test_name, \
+              ::testing::Test, ::testing::internal::GetTestTypeId())
+#define TEST_F(test_fixture, test_name)\
+  GTEST_TEST_(test_fixture##_64bit, test_name, test_fixture, \
+              ::testing::internal::GetTypeId<test_fixture>())
+#endif
+
+talk_base::Pathname GetTalkDirectory();
+
+#endif  // TALK_BASE_GUNIT_H_
diff --git a/talk/base/network_unittest.cc b/talk/base/network_unittest.cc
new file mode 100644
index 0000000..6a9919d
--- /dev/null
+++ b/talk/base/network_unittest.cc
@@ -0,0 +1,144 @@
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+
+
+#include <vector>
+#include "talk/base/gunit.h"
+#include "talk/base/network.h"
+
+namespace talk_base {
+
+// A network that should not be ignored.
+static const Network kNetwork1("test1", "Test Network Adapter 1",
+                               0x12345678, 0x12345601);
+// A network that should be ignored (IP is 0.1.0.4).
+static const Network kNetwork2("test2", "Test Network Adapter 2",
+                               0x00010004, 0x01000000);
+// A network that should not be ignored (IP is valid, but gateway is 0.0.0.1).
+// Previously, we attempted to ignore networks with no default gateway,
+// but if an explicit route is set, no default gateway is needed.
+static const Network kNetwork3("test3", "Test Network Adapter 3",
+                               0x55667788, 0x01000000);
+
+class NetworkTest : public testing::Test, public sigslot::has_slots<>  {
+ public:
+  NetworkTest()
+      : callback_called_(false) {
+  }
+
+  void OnNetworksChanged() {
+    callback_called_ = true;
+  }
+
+  void MergeNetworkList(BasicNetworkManager& network_manager,
+                        const NetworkManager::NetworkList& list,
+                        bool force_notification) {
+    network_manager.MergeNetworkList(list, force_notification);
+  }
+
+  bool IsIgnoredNetwork(const Network& network) {
+    return BasicNetworkManager::IsIgnoredNetwork(network);
+  }
+
+ protected:
+  bool callback_called_;
+};
+
+// Test that the Network ctor works properly.
+TEST_F(NetworkTest, TestNetworkConstruct) {
+  EXPECT_EQ("test1", kNetwork1.name());
+  EXPECT_EQ("Test Network Adapter 1", kNetwork1.description());
+  EXPECT_EQ(0x12345678U, kNetwork1.ip());
+  EXPECT_EQ(0x12345601U, kNetwork1.gateway_ip());
+  EXPECT_FALSE(kNetwork1.ignored());
+}
+
+// Tests that our ignore function works properly.
+TEST_F(NetworkTest, TestNetworkIgnore) {
+  EXPECT_FALSE(IsIgnoredNetwork(kNetwork1));
+  EXPECT_TRUE(IsIgnoredNetwork(kNetwork2));
+  EXPECT_FALSE(IsIgnoredNetwork(kNetwork3));
+}
+
+// Test that UpdateNetworks succeeds.
+TEST_F(NetworkTest, TestUpdateNetworks) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+  manager.StartUpdating();
+  Thread::Current()->ProcessMessages(0);
+  EXPECT_TRUE(callback_called_);
+}
+
+// Verify that MergeNetworkList() merges network lists properly.
+TEST_F(NetworkTest, TestMergeNetworkList) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+
+  // Add kNetwork1 to the list of networks.
+  NetworkManager::NetworkList list;
+  list.push_back(new Network(kNetwork1));
+  callback_called_ = false;
+  MergeNetworkList(manager, list, false);
+  EXPECT_TRUE(callback_called_);
+  list.clear();
+
+  manager.GetNetworks(&list);
+  EXPECT_EQ(1U, list.size());
+  EXPECT_EQ(kNetwork1.ToString(), list[0]->ToString());
+  Network* net1 = list[0];
+  list.clear();
+
+  // Replace kNetwork1 with kNetwork2.
+  list.push_back(new Network(kNetwork2));
+  callback_called_ = false;
+  MergeNetworkList(manager, list, false);
+  EXPECT_TRUE(callback_called_);
+  list.clear();
+
+  manager.GetNetworks(&list);
+  EXPECT_EQ(1U, list.size());
+  EXPECT_EQ(kNetwork2.ToString(), list[0]->ToString());
+  Network* net2 = list[0];
+  list.clear();
+
+  // Add Network2 back.
+  list.push_back(new Network(kNetwork1));
+  list.push_back(new Network(kNetwork2));
+  callback_called_ = false;
+  MergeNetworkList(manager, list, false);
+  EXPECT_TRUE(callback_called_);
+  list.clear();
+
+  // Verify that we get previous instances of Network objects.
+  manager.GetNetworks(&list);
+  EXPECT_EQ(2U, list.size());
+  EXPECT_TRUE((net1 == list[0] && net2 == list[1]) ||
+              (net1 == list[1] && net2 == list[0]));
+  list.clear();
+
+  // Call MergeNetworkList() again and verify that we don't get update
+  // notification.
+  list.push_back(new Network(kNetwork2));
+  list.push_back(new Network(kNetwork1));
+  callback_called_ = false;
+  MergeNetworkList(manager, list, false);
+  EXPECT_FALSE(callback_called_);
+  list.clear();
+
+  // Verify that we get previous instances of Network objects.
+  manager.GetNetworks(&list);
+  EXPECT_EQ(2U, list.size());
+  EXPECT_TRUE((net1 == list[0] && net2 == list[1]) ||
+              (net1 == list[1] && net2 == list[0]));
+  list.clear();
+}
+
+// Test that DumpNetworks works.
+TEST_F(NetworkTest, TestDumpNetworks) {
+  BasicNetworkManager::DumpNetworks(true);
+}
+
+}  // namespace talk_base
diff --git a/talk/base/unittest_main.cc b/talk/base/unittest_main.cc
new file mode 100644
index 0000000..56fbc54
--- /dev/null
+++ b/talk/base/unittest_main.cc
@@ -0,0 +1,119 @@
+// Copyright 2007 Google Inc. All Rights Reserved.
+
+//         juberti@google.com (Justin Uberti)
+//
+// A reuseable entry point for gunit tests.
+
+#ifdef WIN32
+#include <crtdbg.h>
+#endif
+
+#include "talk/base/flags.h"
+#include "talk/base/fileutils.h"
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/pathutils.h"
+
+DEFINE_bool(help, false, "prints this message");
+DEFINE_string(log, "", "logging options to use");
+#ifdef WIN32
+DEFINE_int(crt_break_alloc, -1, "memory allocation to break on");
+DEFINE_bool(default_error_handlers, false,
+            "leave the default exception/dbg handler functions in place");
+
+void TestInvalidParameterHandler(const wchar_t* expression,
+                                 const wchar_t* function,
+                                 const wchar_t* file,
+                                 unsigned int line,
+                                 uintptr_t pReserved) {
+  LOG(LS_ERROR) << "InvalidParameter Handler called.  Exiting.";
+  LOG(LS_ERROR) << expression << std::endl << function << std::endl << file
+                << std::endl << line;
+  exit(1);
+}
+void TestPureCallHandler() {
+  LOG(LS_ERROR) << "Purecall Handler called.  Exiting.";
+  exit(1);
+}
+int TestCrtReportHandler(int report_type, char* msg, int* retval) {
+    LOG(LS_ERROR) << "CrtReport Handler called...";
+    LOG(LS_ERROR) << msg;
+  if (report_type == _CRT_ASSERT) {
+    exit(1);
+  } else {
+    *retval = 0;
+    return TRUE;
+  }
+}
+#endif  // WIN32
+
+talk_base::Pathname GetTalkDirectory() {
+  // Locate talk directory.
+  talk_base::Pathname path = talk_base::Filesystem::GetCurrentDirectory();
+  std::string talk_folder_name("talk");
+  talk_folder_name += path.folder_delimiter();
+  while (path.folder_name() != talk_folder_name && !path.empty()) {
+    path.SetFolder(path.parent_folder());
+  }
+
+  // If not running inside "talk" folder, then assume running in its parent
+  // folder.
+  if (path.empty()) {
+    path = talk_base::Filesystem::GetCurrentDirectory();
+    path.AppendFolder("talk");
+    // Make sure the folder exist.
+    if (!talk_base::Filesystem::IsFolder(path)) {
+      path.clear();
+    }
+  }
+  return path;
+}
+
+
+int main(int argc, char** argv) {
+  testing::InitGoogleTest(&argc, argv);
+  FlagList::SetFlagsFromCommandLine(&argc, argv, false);
+  if (FLAG_help) {
+    FlagList::Print(NULL, false);
+    return 0;
+  }
+
+#ifdef WIN32
+  if (!FLAG_default_error_handlers) {
+    // Make sure any errors don't throw dialogs hanging the test run.
+    _set_invalid_parameter_handler(TestInvalidParameterHandler);
+    _set_purecall_handler(TestPureCallHandler);
+    _CrtSetReportHook2(_CRT_RPTHOOK_INSTALL, TestCrtReportHandler);
+  }
+
+#ifdef _DEBUG  // Turn on memory leak checking on Windows.
+  _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF |_CRTDBG_LEAK_CHECK_DF);
+  if (FLAG_crt_break_alloc >= 0) {
+    _crtBreakAlloc = FLAG_crt_break_alloc;
+  }
+#endif  // _DEBUG
+#endif  // WIN32
+
+  talk_base::Filesystem::SetOrganizationName("google");
+  talk_base::Filesystem::SetApplicationName("unittest");
+
+  // By default, log timestamps. Allow overrides by used of a --log flag.
+  talk_base::LogMessage::LogTimestamps();
+  if (*FLAG_log != '\0') {
+    talk_base::LogMessage::ConfigureLogging(FLAG_log, "unittest.log");
+  }
+
+  int res = RUN_ALL_TESTS();
+
+  // clean up logging so we don't appear to leak memory.
+  talk_base::LogMessage::ConfigureLogging("", "");
+
+#ifdef WIN32
+  // Unhook crt function so that we don't ever log after statics have been
+  // uninitialized.
+  if (!FLAG_default_error_handlers)
+    _CrtSetReportHook2(_CRT_RPTHOOK_REMOVE, TestCrtReportHandler);
+#endif
+
+  return res;
+}
diff --git a/talk/examples/call/callclient.cc b/talk/examples/call/callclient.cc
index 8b9a834..fca401e 100644
--- a/talk/examples/call/callclient.cc
+++ b/talk/examples/call/callclient.cc
@@ -54,6 +54,7 @@
 #include "talk/session/phone/mediasessionclient.h"
 #include "talk/session/phone/videorendererfactory.h"
 #include "talk/xmpp/constants.h"
+#include "talk/xmpp/mucroomconfigtask.h"
 #include "talk/xmpp/mucroomlookuptask.h"
 
 namespace {
@@ -696,7 +697,8 @@
   }
 
   buzz::MucRoomLookupTask* lookup_query_task =
-      new buzz::MucRoomLookupTask(xmpp_client_, room, domain);
+      new buzz::MucRoomLookupTask(
+          xmpp_client_, buzz::JID_GOOGLE_MUC_LOOKUP, room, domain);
   lookup_query_task->SignalResult.connect(this,
       &CallClient::OnRoomLookupResponse);
   lookup_query_task->SignalError.connect(this,
@@ -739,11 +741,35 @@
   presence_out_->SendDirected(muc->local_jid(), my_status_);
 }
 
-void CallClient::OnRoomLookupResponse(const buzz::MucRoomInfo& room_info) {
-  JoinMuc(room_info.room_jid);
+void CallClient::OnRoomLookupResponse(buzz::MucRoomLookupTask* task,
+                                      const buzz::MucRoomInfo& room) {
+  // The server requires the room be "configured" before being used.
+  // We only need to configure it if we create it, but rooms are
+  // auto-created at lookup, so there's currently no way to know if we
+  // created it.  So, we configure it every time, just in case.
+  // Luckily, it appears to be safe to configure a room that's already
+  // configured.  Our current flow is:
+  // 1. Lookup/auto-create
+  // 2. Configure
+  // 3. Join
+  // TODO: In the future, once the server supports it, we
+  // should:
+  // 1. Lookup
+  // 2. Create and Configure if necessary
+  // 3. Join
+  std::vector<std::string> room_features;
+  room_features.push_back(buzz::STR_MUC_ROOM_FEATURE_ENTERPRISE);
+  buzz::MucRoomConfigTask* room_config_task = new buzz::MucRoomConfigTask(
+      xmpp_client_, room.jid, room.full_name(), room_features);
+  room_config_task->SignalResult.connect(this,
+      &CallClient::OnRoomConfigResult);
+  room_config_task->SignalError.connect(this,
+      &CallClient::OnRoomConfigError);
+  room_config_task->Start();
 }
 
-void CallClient::OnRoomLookupError(const buzz::XmlElement* stanza) {
+void CallClient::OnRoomLookupError(buzz::IqTask* task,
+                                   const buzz::XmlElement* stanza) {
   if (stanza == NULL) {
     console_->PrintLine("Room lookup failed.");
   } else {
@@ -751,6 +777,19 @@
   }
 }
 
+void CallClient::OnRoomConfigResult(buzz::MucRoomConfigTask* task) {
+  JoinMuc(task->room_jid());
+}
+
+void CallClient::OnRoomConfigError(buzz::IqTask* task,
+                                   const buzz::XmlElement* stanza) {
+  if (stanza == NULL) {
+    console_->PrintLine("Room config failed.");
+  } else {
+    console_->PrintLine("Room config error: ", stanza->Str().c_str());
+  }
+}
+
 void CallClient::OnMucInviteReceived(const buzz::Jid& inviter,
     const buzz::Jid& room,
     const std::vector<buzz::AvailableMediaEntry>& avail) {
diff --git a/talk/examples/call/callclient.h b/talk/examples/call/callclient.h
index fb7878e..18296be 100644
--- a/talk/examples/call/callclient.h
+++ b/talk/examples/call/callclient.h
@@ -50,6 +50,8 @@
 class DiscoInfoQueryTask;
 class Muc;
 class Status;
+class IqTask;
+class MucRoomConfigTask;
 class MucRoomLookupTask;
 class MucStatus;
 class XmlElement;
@@ -174,8 +176,13 @@
   void OnSpeakerChanged(cricket::Call* call,
                         cricket::Session* session,
                         const cricket::NamedSource& speaker_source);
-  void OnRoomLookupResponse(const buzz::MucRoomInfo& room_info);
-  void OnRoomLookupError(const buzz::XmlElement* stanza);
+  void OnRoomLookupResponse(buzz::MucRoomLookupTask* task,
+                            const buzz::MucRoomInfo& room_info);
+  void OnRoomLookupError(buzz::IqTask* task,
+                         const buzz::XmlElement* stanza);
+  void OnRoomConfigResult(buzz::MucRoomConfigTask* task);
+  void OnRoomConfigError(buzz::IqTask* task,
+                         const buzz::XmlElement* stanza);
   buzz::Jid GenerateRandomMucJid();
 
   void AddStaticRenderedView(
diff --git a/talk/libjingle.scons b/talk/libjingle.scons
index 03239d4..e053a8f 100644
--- a/talk/libjingle.scons
+++ b/talk/libjingle.scons
@@ -20,6 +20,18 @@
                "HAVE_EXPAT_CONFIG_H",
              ],
 )
+talk.Library(env, name = "gunit",
+             srcs = [
+               "third_party/gtest/src/gtest-all.cc",
+             ],
+             includedirs = [
+               "third_party/gtest/include",
+               "third_party/gtest",
+             ],
+             cppdefines = [
+               "LIBJINGLE_UNITTEST",
+             ],
+)
 talk.Library(env, name = "srtp",
              srcs = [
                "third_party/srtp/crypto/cipher/aes.c",
@@ -63,6 +75,8 @@
                "sound/alsasoundsystem.cc",
                "sound/alsasymboltable.cc",
                "sound/linuxsoundsystem.cc",
+               "sound/nullsoundsystem.cc",
+               "sound/nullsoundsystemfactory.cc",
                "sound/pulseaudiosoundsystem.cc",
                "sound/pulseaudiosymboltable.cc",
                "sound/platformsoundsystem.cc",
@@ -212,6 +226,7 @@
                "xmpp/constants.cc",
                "xmpp/iqtask.cc",
                "xmpp/jid.cc",
+               "xmpp/mucroomconfigtask.cc",
                "xmpp/mucroomlookuptask.cc",
                "xmpp/ratelimitmanager.cc",
                "xmpp/saslmechanism.cc",
@@ -262,6 +277,21 @@
                "gtk+-2.0",
              ],
 )
+talk.Library(env, name = "unittest_main",
+             libs = [
+               "gunit",
+             ],
+             srcs = [
+               "base/unittest_main.cc",
+             ],
+             includedirs = [
+               "third_party/gtest/include",
+               "third_party/gtest",
+             ],
+             cppdefines = [
+               "LIBJINGLE_UNITTEST",
+             ],
+)
 talk.App(env, name = "login",
          libs = [
            "jingle",
@@ -343,3 +373,18 @@
            "p2p/base/stunserver_main.cc",
          ],
 )
+talk.Unittest(env, name = "network",
+              libs = [
+                "jingle",
+              ],
+              srcs = [
+                "base/network_unittest.cc",
+              ],
+              includedirs = [
+                "third_party/gtest/include",
+                "third_party/gtest",
+              ],
+              cppdefines = [
+                "LIBJINGLE_UNITTEST",
+              ],
+)
diff --git a/talk/session/phone/channelmanager.cc b/talk/session/phone/channelmanager.cc
index 0d11b53..97419cb 100644
--- a/talk/session/phone/channelmanager.cc
+++ b/talk/session/phone/channelmanager.cc
@@ -654,6 +654,8 @@
 
 void ChannelManager::OnVideoCaptureResult(VideoCapturer* capturer,
                                           CaptureResult result) {
+  // TODO: Check capturer and signal failure only for camera video, not
+  // screencast.
   capturing_ = result == CR_SUCCESS;
   main_thread_->Post(this, MSG_CAMERASTARTED,
                      new talk_base::TypedMessageData<CaptureResult>(result));
diff --git a/talk/session/phone/mediacommon.h b/talk/session/phone/mediacommon.h
new file mode 100644
index 0000000..7a606e3
--- /dev/null
+++ b/talk/session/phone/mediacommon.h
@@ -0,0 +1,42 @@
+/*
+ * libjingle
+ * Copyright 2004--2007, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_MEDIACOMMON_H_
+#define TALK_SESSION_PHONE_MEDIACOMMON_H_
+
+namespace cricket {
+
+enum MediaCapabilities {
+  AUDIO_RECV = 1 << 0,
+  AUDIO_SEND = 1 << 1,
+  VIDEO_RECV = 1 << 2,
+  VIDEO_SEND = 1 << 3,
+};
+
+}  // namespace cricket
+
+#endif  // TALK_SESSION_PHONE_MEDIACOMMON_H_
diff --git a/talk/session/phone/webrtccommon.h b/talk/session/phone/webrtccommon.h
new file mode 100644
index 0000000..60fee27
--- /dev/null
+++ b/talk/session/phone/webrtccommon.h
@@ -0,0 +1,84 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef TALK_SESSION_PHONE_WEBRTCCOMMON_H_
+#define TALK_SESSION_PHONE_WEBRTCCOMMON_H_
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "common_types.h"
+#include "video_engine/main/interface/vie_base.h"
+#include "voice_engine/main/interface/voe_base.h"
+#else
+#include "third_party/webrtc/files/include/common_types.h"
+#include "third_party/webrtc/files/include/voe_base.h"
+#include "third_party/webrtc/files/include/vie_base.h"
+#endif  // WEBRTC_RELATIVE_PATH
+
+namespace cricket {
+
+// Tracing helpers, for easy logging when WebRTC calls fail.
+// Example: "LOG_RTCERR1(StartSend, channel);" produces the trace
+//          "StartSend(1) failed, err=XXXX"
+// The method GetLastEngineError must be defined in the calling scope.
+#define LOG_RTCERR0(func) \
+    LOG_RTCERR0_EX(func, GetLastEngineError())
+#define LOG_RTCERR1(func, a1) \
+    LOG_RTCERR1_EX(func, a1, GetLastEngineError())
+#define LOG_RTCERR2(func, a1, a2) \
+    LOG_RTCERR2_EX(func, a1, a2, GetLastEngineError())
+#define LOG_RTCERR3(func, a1, a2, a3) \
+    LOG_RTCERR3_EX(func, a1, a2, a3, GetLastEngineError())
+#define LOG_RTCERR4(func, a1, a2, a3, a4) \
+    LOG_RTCERR4_EX(func, a1, a2, a3, a4, GetLastEngineError())
+#define LOG_RTCERR5(func, a1, a2, a3, a4, a5) \
+    LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, GetLastEngineError())
+#define LOG_RTCERR6(func, a1, a2, a3, a4, a5, a6) \
+    LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, GetLastEngineError())
+#define LOG_RTCERR0_EX(func, err) LOG(LS_WARNING) \
+    << "" << #func << "() failed, err=" << err
+#define LOG_RTCERR1_EX(func, a1, err) LOG(LS_WARNING) \
+    << "" << #func << "(" << a1 << ") failed, err=" << err
+#define LOG_RTCERR2_EX(func, a1, a2, err) LOG(LS_WARNING) \
+    << "" << #func << "(" << a1 << ", " << a2 << ") failed, err=" \
+    << err
+#define LOG_RTCERR3_EX(func, a1, a2, a3, err) LOG(LS_WARNING) \
+    << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+    << ") failed, err=" << err
+#define LOG_RTCERR4_EX(func, a1, a2, a3, a4, err) LOG(LS_WARNING) \
+    << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+    << ", " << a4 << ") failed, err=" << err
+#define LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, err) LOG(LS_WARNING) \
+    << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+    << ", " << a4 << ", " << a5 << ") failed, err=" << err
+#define LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, err) LOG(LS_WARNING) \
+    << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+    << ", " << a4 << ", " << a5 << ", " << a6 << ") failed, err=" << err
+
+}  // namespace cricket
+
+#endif  // TALK_SESSION_PHONE_WEBRTCCOMMON_H_
diff --git a/talk/session/phone/webrtcpassthroughrender.cc b/talk/session/phone/webrtcpassthroughrender.cc
new file mode 100644
index 0000000..cecaa42
--- /dev/null
+++ b/talk/session/phone/webrtcpassthroughrender.cc
@@ -0,0 +1,135 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/phone/webrtcpassthroughrender.h"
+
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+
+namespace cricket {
+
+class PassthroughStream: public webrtc::VideoRenderCallback {
+ public:
+  explicit PassthroughStream(const WebRtc_UWord32 stream_id)
+      : stream_id_(stream_id) {
+  }
+  virtual ~PassthroughStream() {
+  }
+  virtual WebRtc_Word32 RenderFrame(const WebRtc_UWord32 stream_id,
+                                    webrtc::VideoFrame& videoFrame) {
+    talk_base::CritScope cs(&stream_critical_);
+    // Send frame for rendering directly
+    if (renderer_) {
+      renderer_->RenderFrame(stream_id, videoFrame);
+    }
+    return 0;
+  }
+  WebRtc_Word32 SetRenderer(VideoRenderCallback* renderer) {
+    talk_base::CritScope cs(&stream_critical_);
+    renderer_ = renderer;
+    return 0;
+  }
+
+ private:
+  WebRtc_UWord32 stream_id_;
+  VideoRenderCallback* renderer_;
+  talk_base::CriticalSection stream_critical_;
+};
+
+WebRtcPassthroughRender::WebRtcPassthroughRender()
+    : window_(NULL) {
+}
+
+WebRtcPassthroughRender::~WebRtcPassthroughRender() {
+  while (!stream_render_map_.empty()) {
+    PassthroughStream* stream = stream_render_map_.begin()->second;
+    stream_render_map_.erase(stream_render_map_.begin());
+    delete stream;
+  }
+}
+
+webrtc::VideoRenderCallback* WebRtcPassthroughRender::AddIncomingRenderStream(
+    const WebRtc_UWord32 stream_id,
+    const WebRtc_UWord32 zOrder,
+    const float left, const float top,
+    const float right, const float bottom) {
+  talk_base::CritScope cs(&render_critical_);
+  StreamMap::iterator it;
+  it = stream_render_map_.find(stream_id);
+  if (it != stream_render_map_.end())
+    return NULL;
+
+  PassthroughStream* stream = new PassthroughStream(stream_id);
+  // Store the stream
+  stream_render_map_[stream_id] = stream;
+  return stream;
+}
+
+WebRtc_Word32 WebRtcPassthroughRender::DeleteIncomingRenderStream(
+    const WebRtc_UWord32 stream_id) {
+  talk_base::CritScope cs(&render_critical_);
+  StreamMap::iterator it;
+  it = stream_render_map_.find(stream_id);
+  if (it == stream_render_map_.end()) {
+    LOG(LS_WARNING) << "DeleteIncomingRenderStream failed to find stream_id: "
+                    << stream_id;
+    return -1;
+  }
+  PassthroughStream* stream = it->second;
+  delete stream;
+  stream_render_map_.erase(it);
+  return 0;
+}
+
+WebRtc_Word32 WebRtcPassthroughRender::AddExternalRenderCallback(
+    const WebRtc_UWord32 stream_id,
+    webrtc::VideoRenderCallback* render_object) {
+  talk_base::CritScope cs(&render_critical_);
+  StreamMap::iterator it;
+  it = stream_render_map_.find(stream_id);
+  if (it == stream_render_map_.end()) {
+    LOG(LS_WARNING) << "AddExternalRenderCallback failed to find stream_id: "
+                    << stream_id;
+    return -1;
+  }
+  PassthroughStream* stream = it->second;
+  ASSERT(stream != NULL);
+  return stream->SetRenderer(render_object);
+}
+
+bool WebRtcPassthroughRender::HasIncomingRenderStream(
+    const WebRtc_UWord32 stream_id) const {
+  StreamMap::const_iterator it;
+  it = stream_render_map_.find(stream_id);
+  return (it != stream_render_map_.end());
+}
+
+webrtc::RawVideoType WebRtcPassthroughRender::PreferredVideoType() const {
+  return webrtc::kVideoI420;
+}
+
+}  // namespace cricket
diff --git a/talk/session/phone/webrtcpassthroughrender.h b/talk/session/phone/webrtcpassthroughrender.h
new file mode 100644
index 0000000..8d2ae90
--- /dev/null
+++ b/talk/session/phone/webrtcpassthroughrender.h
@@ -0,0 +1,211 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_WEBRTCPASSTHROUGHRENDER_H_
+#define TALK_SESSION_PHONE_WEBRTCPASSTHROUGHRENDER_H_
+
+#include <map>
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "modules/video_render/main/interface/video_render.h"
+#else
+#include "third_party/webrtc/files/include/video_render.h"
+#endif
+#include "talk/base/criticalsection.h"
+
+namespace cricket {
+class PassthroughStream;
+
+class WebRtcPassthroughRender : public webrtc::VideoRender {
+ public:
+  WebRtcPassthroughRender();
+  virtual ~WebRtcPassthroughRender();
+
+  virtual WebRtc_Word32 Version(WebRtc_Word8* version,
+      WebRtc_UWord32& remainingBufferInBytes,
+      WebRtc_UWord32& position) const {
+    return 0;
+  }
+
+  virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id) {
+    return 0;
+  }
+
+  virtual WebRtc_Word32 TimeUntilNextProcess() { return 0; }
+
+  virtual WebRtc_Word32 Process() { return 0; }
+
+  virtual void* Window() {
+    talk_base::CritScope cs(&render_critical_);
+    return window_;
+  }
+
+  virtual WebRtc_Word32 ChangeWindow(void* window) {
+    talk_base::CritScope cs(&render_critical_);
+    window_ = window;
+    return 0;
+  }
+
+  virtual webrtc::VideoRenderCallback* AddIncomingRenderStream(
+      const WebRtc_UWord32 stream_id,
+      const WebRtc_UWord32 zOrder,
+      const float left, const float top,
+      const float right, const float bottom);
+
+  virtual WebRtc_Word32 DeleteIncomingRenderStream(
+      const WebRtc_UWord32 stream_id);
+
+  virtual WebRtc_Word32 AddExternalRenderCallback(
+      const WebRtc_UWord32 stream_id,
+      webrtc::VideoRenderCallback* render_object);
+
+  virtual WebRtc_Word32 GetIncomingRenderStreamProperties(
+      const WebRtc_UWord32 stream_id,
+      WebRtc_UWord32& zOrder,
+      float& left, float& top,
+      float& right, float& bottom) const {
+    return -1;
+  }
+
+  virtual WebRtc_UWord32 GetIncomingFrameRate(
+      const WebRtc_UWord32 stream_id) {
+    return 0;
+  }
+
+  virtual WebRtc_UWord32 GetNumIncomingRenderStreams() const {
+    return stream_render_map_.size();
+  }
+
+  virtual bool HasIncomingRenderStream(const WebRtc_UWord32 stream_id) const;
+
+  virtual WebRtc_Word32 RegisterRawFrameCallback(
+      const WebRtc_UWord32 stream_id,
+      webrtc::VideoRenderCallback* callback_obj) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 GetLastRenderedFrame(
+      const WebRtc_UWord32 stream_id,
+      webrtc::VideoFrame &frame) const {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 StartRender(
+      const WebRtc_UWord32 stream_id) { return 0; }
+
+  virtual WebRtc_Word32 StopRender(
+      const WebRtc_UWord32 stream_id) { return 0; }
+
+  virtual WebRtc_Word32 ResetRender() { return 0; }
+
+  virtual webrtc::RawVideoType PreferredVideoType() const;
+
+  virtual bool IsFullScreen() { return false; }
+
+  virtual WebRtc_Word32 GetScreenResolution(
+      WebRtc_UWord32& screenWidth,
+      WebRtc_UWord32& screenHeight) const {
+    return -1;
+  }
+
+  virtual WebRtc_UWord32 RenderFrameRate(
+      const WebRtc_UWord32 stream_id) {
+    return 0;
+  }
+
+  virtual WebRtc_Word32 SetStreamCropping(
+      const WebRtc_UWord32 stream_id,
+      const float left, const float top,
+      const float right,
+      const float bottom) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 ConfigureRenderer(
+      const WebRtc_UWord32 stream_id,
+      const unsigned int zOrder,
+      const float left, const float top,
+      const float right,
+      const float bottom) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 SetTransparentBackground(const bool enable) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 FullScreenRender(void* window, const bool enable) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 SetBitmap(const void* bitMap,
+      const WebRtc_UWord8 pictureId, const void* colorKey,
+      const float left, const float top,
+      const float right, const float bottom) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 SetText(const WebRtc_UWord8 textId,
+      const WebRtc_UWord8* text,
+      const WebRtc_Word32 textLength,
+      const WebRtc_UWord32 textColorRef,
+      const WebRtc_UWord32 backgroundColorRef,
+      const float left, const float top,
+      const float right, const float bottom) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 SetStartImage(
+      const WebRtc_UWord32 stream_id,
+      const webrtc::VideoFrame& videoFrame) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 SetTimeoutImage(
+      const WebRtc_UWord32 stream_id,
+      const webrtc::VideoFrame& videoFrame,
+      const WebRtc_UWord32 timeout) {
+    return -1;
+  }
+
+  virtual WebRtc_Word32 MirrorRenderStream(const int renderId,
+                                           const bool enable,
+                                           const bool mirrorXAxis,
+                                           const bool mirrorYAxis) {
+    return -1;
+  }
+
+ private:
+  typedef std::map<WebRtc_UWord32, PassthroughStream*> StreamMap;
+
+  void* window_;
+  StreamMap stream_render_map_;
+  talk_base::CriticalSection render_critical_;
+};
+}  // namespace cricket
+
+#endif  // TALK_SESSION_PHONE_WEBRTCPASSTHROUGHRENDER_H_
diff --git a/talk/session/phone/webrtcpassthroughrender_unittest.cc b/talk/session/phone/webrtcpassthroughrender_unittest.cc
new file mode 100644
index 0000000..b33d38b
--- /dev/null
+++ b/talk/session/phone/webrtcpassthroughrender_unittest.cc
@@ -0,0 +1,117 @@
+// Copyright 2008 Google Inc. All Rights Reserved,
+//
+// Author: Ronghua Wu (ronghuawu@google.com)
+
+#include <string>
+
+#include "talk/base/gunit.h"
+#include "talk/session/phone/webrtcpassthroughrender.h"
+#include "talk/session/phone/testutils.h"
+
+class WebRtcPassthroughRenderTest : public testing::Test {
+ public:
+  class ExternalRenderer : public webrtc::VideoRenderCallback {
+   public:
+    ExternalRenderer() : frame_num_(0) {
+    }
+
+    virtual ~ExternalRenderer() {
+    }
+
+    virtual WebRtc_Word32 RenderFrame(
+        const WebRtc_UWord32 stream_id,
+        webrtc::VideoFrame& videoFrame) {
+      ++frame_num_;
+      LOG(INFO) << "RenderFrame stream_id: " << stream_id
+                << " frame_num: " << frame_num_;
+      return 0;
+    }
+
+    int frame_num() const {
+      return frame_num_;
+    }
+
+   private:
+    int frame_num_;
+  };
+
+  WebRtcPassthroughRenderTest()
+      : renderer_(new cricket::WebRtcPassthroughRender()) {
+  }
+
+  ~WebRtcPassthroughRenderTest() {
+  }
+
+  webrtc::VideoRenderCallback* AddIncomingRenderStream(int stream_id) {
+    return renderer_->AddIncomingRenderStream(stream_id, 0, 0, 0, 0, 0);
+  }
+
+  bool HasIncomingRenderStream(int stream_id) {
+    return renderer_->HasIncomingRenderStream(stream_id);
+  }
+
+  bool DeleteIncomingRenderStream(int stream_id) {
+    return (renderer_->DeleteIncomingRenderStream(stream_id) == 0);
+  }
+
+  bool AddExternalRenderCallback(int stream_id,
+                                 webrtc::VideoRenderCallback* renderer) {
+    return (renderer_->AddExternalRenderCallback(stream_id, renderer) == 0);
+  }
+
+ private:
+  talk_base::scoped_ptr<cricket::WebRtcPassthroughRender> renderer_;
+};
+
+TEST_F(WebRtcPassthroughRenderTest, Streams) {
+  const int stream_id1 = 1234;
+  const int stream_id2 = 5678;
+  webrtc::VideoRenderCallback* stream = NULL;
+  // Add a new stream
+  stream = AddIncomingRenderStream(stream_id1);
+  EXPECT_TRUE(stream != NULL);
+  EXPECT_TRUE(HasIncomingRenderStream(stream_id1));
+  // Tried to add a already existed stream should return null
+  stream =AddIncomingRenderStream(stream_id1);
+  EXPECT_TRUE(stream == NULL);
+  stream = AddIncomingRenderStream(stream_id2);
+  EXPECT_TRUE(stream != NULL);
+  EXPECT_TRUE(HasIncomingRenderStream(stream_id2));
+  // Remove the stream
+  EXPECT_TRUE(DeleteIncomingRenderStream(stream_id2));
+  EXPECT_TRUE(!HasIncomingRenderStream(stream_id2));
+  // Add back the removed stream
+  stream = AddIncomingRenderStream(stream_id2);
+  EXPECT_TRUE(stream != NULL);
+  EXPECT_TRUE(HasIncomingRenderStream(stream_id2));
+}
+
+TEST_F(WebRtcPassthroughRenderTest, Renderer) {
+  webrtc::VideoFrame frame;
+  const int stream_id1 = 1234;
+  const int stream_id2 = 5678;
+  webrtc::VideoRenderCallback* stream1 = NULL;
+  webrtc::VideoRenderCallback* stream2 = NULL;
+  // Add two new stream
+  stream1 = AddIncomingRenderStream(stream_id1);
+  EXPECT_TRUE(stream1 != NULL);
+  EXPECT_TRUE(HasIncomingRenderStream(stream_id1));
+  stream2 = AddIncomingRenderStream(stream_id2);
+  EXPECT_TRUE(stream2 != NULL);
+  EXPECT_TRUE(HasIncomingRenderStream(stream_id2));
+  // Register the external renderer
+  WebRtcPassthroughRenderTest::ExternalRenderer renderer1;
+  WebRtcPassthroughRenderTest::ExternalRenderer renderer2;
+  AddExternalRenderCallback(stream_id1, &renderer1);
+  AddExternalRenderCallback(stream_id2, &renderer2);
+  int test_frame_num = 10;
+  for (int i = 0; i < test_frame_num; ++i) {
+    stream1->RenderFrame(stream_id1, frame);
+  }
+  EXPECT_EQ(test_frame_num, renderer1.frame_num());
+  test_frame_num = 30;
+  for (int i = 0; i < test_frame_num; ++i) {
+    stream2->RenderFrame(stream_id2, frame);
+  }
+  EXPECT_EQ(test_frame_num, renderer2.frame_num());
+}
diff --git a/talk/session/phone/webrtcvideoengine.cc b/talk/session/phone/webrtcvideoengine.cc
new file mode 100644
index 0000000..6f9ff6a
--- /dev/null
+++ b/talk/session/phone/webrtcvideoengine.cc
@@ -0,0 +1,1003 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_WEBRTC_VIDEO
+
+#include "talk/session/phone/webrtcvideoengine.h"
+
+#include "talk/base/common.h"
+#include "talk/base/buffer.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringutils.h"
+#include "talk/session/phone/videorenderer.h"
+#include "talk/session/phone/webrtcpassthroughrender.h"
+#include "talk/session/phone/webrtcvoiceengine.h"
+#include "talk/session/phone/webrtcvideoframe.h"
+#include "talk/session/phone/webrtcvie.h"
+#include "talk/session/phone/webrtcvoe.h"
+
+namespace cricket {
+
+static const int kDefaultLogSeverity = talk_base::LS_WARNING;
+static const int kStartVideoBitrate = 300;
+static const int kMaxVideoBitrate = 1000;
+
+class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
+ public:
+  explicit WebRtcRenderAdapter(VideoRenderer* renderer)
+      : renderer_(renderer) {
+  }
+
+  virtual int FrameSizeChange(unsigned int width, unsigned int height,
+                              unsigned int /*number_of_streams*/) {
+    if (renderer_ == NULL)
+      return 0;
+    width_ = width;
+    height_ = height;
+    return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
+  }
+
+  virtual int DeliverFrame(unsigned char* buffer, int buffer_size) {
+    if (renderer_ == NULL)
+      return 0;
+    WebRtcVideoFrame video_frame;
+    // TODO: Currently by the time DeliverFrame got called,
+    // ViE expects the frame will be rendered ASAP. However, the libjingle
+    // renderer may have its own internal delays. Can you disable the buffering
+    // inside ViE and surface the timing information to this callback?
+    video_frame.Attach(buffer, buffer_size, width_, height_, 0, 0);
+    int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1;
+    uint8* buffer_temp;
+    size_t buffer_size_temp;
+    video_frame.Detach(&buffer_temp, &buffer_size_temp);
+    return ret;
+  }
+
+  virtual ~WebRtcRenderAdapter() {}
+
+ private:
+  VideoRenderer* renderer_;
+  unsigned int width_;
+  unsigned int height_;
+};
+
+const WebRtcVideoEngine::VideoCodecPref
+    WebRtcVideoEngine::kVideoCodecPrefs[] = {
+    {"VP8", 120, 0},
+};
+
+// The formats are sorted by the descending order of width. We use the order to
+// find the next format for CPU and bandwidth adaptation.
+const VideoFormat WebRtcVideoEngine::kVideoFormats[] = {
+  // TODO: Understand why we have problem with 16:9 formats.
+  VideoFormat(1280, 800, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+//VideoFormat(1280, 720, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+  VideoFormat(960, 600, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+//VideoFormat(960, 540, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+  VideoFormat(640, 400, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+//VideoFormat(640, 360, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+  VideoFormat(480, 300, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+//VideoFormat(480, 270, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+  VideoFormat(320, 200, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+//VideoFormat(320, 180, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+  VideoFormat(240, 150, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+//VideoFormat(240, 135, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+  VideoFormat(160, 100, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+//VideoFormat(160, 90, VideoFormat::FpsToInterval(30), FOURCC_ANY),
+};
+
+// TODO: Understand why 640x400 is not working.
+const VideoFormat WebRtcVideoEngine::kDefaultVideoFormat =
+    VideoFormat(320, 200, VideoFormat::FpsToInterval(30), FOURCC_ANY);
+
+WebRtcVideoEngine::WebRtcVideoEngine()
+    : vie_wrapper_(new ViEWrapper()),
+      capture_module_(NULL),
+      external_capture_(false),
+      render_module_(new WebRtcPassthroughRender()),
+      voice_engine_(NULL) {
+  Construct();
+}
+
+WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
+                                     webrtc::VideoCaptureModule* capture)
+    : vie_wrapper_(new ViEWrapper()),
+      capture_module_(capture),
+      external_capture_(true),
+      render_module_(webrtc::VideoRender::CreateVideoRender(0, NULL, false,
+          webrtc::kRenderExternal)),
+      voice_engine_(voice_engine) {
+  Construct();
+}
+
+WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
+                                     ViEWrapper* vie_wrapper)
+    : vie_wrapper_(vie_wrapper),
+      capture_module_(NULL),
+      external_capture_(false),
+      render_module_(new WebRtcPassthroughRender()),
+      voice_engine_(voice_engine) {
+  Construct();
+}
+
+void  WebRtcVideoEngine::Construct() {
+  capture_id_ = -1;
+  log_level_ = kDefaultLogSeverity;
+  capture_started_ = false;
+
+  ApplyLogging();
+  if (vie_wrapper_->engine()->SetTraceCallback(this) != 0) {
+    LOG_RTCERR1(SetTraceCallback, this);
+  }
+
+  // Set default quality levels for our supported codecs.  We override them here
+  // if we know your cpu performance is low, and they can be updated explicitly
+  // by calling SetDefaultCodec.  For example by a flute preference setting, or
+  // by the server with a jec in response to our reported system info.
+  VideoCodec max_codec(kVideoCodecPrefs[0].payload_type,
+                       kVideoCodecPrefs[0].name,
+                       kDefaultVideoFormat.width,
+                       kDefaultVideoFormat.height,
+                       kDefaultVideoFormat.framerate(), 0);
+  if (!SetDefaultCodec(max_codec)) {
+    LOG(LS_ERROR) << "Failed to initialize list of supported codec types";
+  }
+}
+
+WebRtcVideoEngine::~WebRtcVideoEngine() {
+  LOG(LS_INFO) << " WebRtcVideoEngine::~WebRtcVideoEngine";
+  vie_wrapper_->engine()->SetTraceCallback(NULL);
+  Terminate();
+  vie_wrapper_.reset();
+  if (capture_module_) {
+    webrtc::VideoCaptureModule::Destroy(capture_module_);
+  }
+}
+
+bool WebRtcVideoEngine::Init() {
+  LOG(LS_INFO) << "WebRtcVideoEngine::Init";
+  bool result = InitVideoEngine();
+  if (result) {
+    LOG(LS_INFO) << "VideoEngine Init done";
+  } else {
+    LOG(LS_ERROR) << "VideoEngine Init failed, releasing";
+    Terminate();
+  }
+  return result;
+}
+
+bool WebRtcVideoEngine::InitVideoEngine() {
+  LOG(LS_INFO) << "WebRtcVideoEngine::InitVideoEngine";
+
+  if (vie_wrapper_->base()->Init() != 0) {
+    LOG_RTCERR0(Init);
+    return false;
+  }
+
+  if (!voice_engine_) {
+    LOG(LS_WARNING) << "NULL voice engine";
+  } else if ((vie_wrapper_->base()->SetVoiceEngine(
+      voice_engine_->voe()->engine())) != 0) {
+    LOG_RTCERR0(SetVoiceEngine);
+    return false;
+  }
+
+  if ((vie_wrapper_->base()->RegisterObserver(*this)) != 0) {
+    LOG_RTCERR0(RegisterObserver);
+    return false;
+  }
+
+  if (vie_wrapper_->render()->RegisterVideoRenderModule(
+      *render_module_.get()) != 0) {
+    LOG_RTCERR0(RegisterVideoRenderModule);
+    return false;
+  }
+
+  std::sort(video_codecs_.begin(), video_codecs_.end(),
+            &VideoCodec::Preferable);
+  return true;
+}
+
+void WebRtcVideoEngine::PerformanceAlarm(const unsigned int cpu_load) {
+  LOG(LS_INFO) << "WebRtcVideoEngine::PerformanceAlarm";
+}
+
+// Ignore spammy trace messages, mostly from the stats API when we haven't
+// gotten RTCP info yet from the remote side.
+static bool ShouldIgnoreTrace(const std::string& trace) {
+  static const char* kTracesToIgnore[] = {
+    "\tfailed to GetReportBlockInformation",
+    NULL
+  };
+  for (const char* const* p = kTracesToIgnore; *p; ++p) {
+    if (trace.find(*p) == 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void WebRtcVideoEngine::Print(const webrtc::TraceLevel level,
+                              const char* trace, const int length) {
+  talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
+  if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
+    sev = talk_base::LS_ERROR;
+  else if (level == webrtc::kTraceWarning)
+    sev = talk_base::LS_WARNING;
+  else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
+    sev = talk_base::LS_INFO;
+
+  if (sev >= log_level_) {
+    // Skip past boilerplate prefix text
+    if (length < 72) {
+      std::string msg(trace, length);
+      LOG(LS_ERROR) << "Malformed webrtc log message: ";
+      LOG_V(sev) << msg;
+    } else {
+      std::string msg(trace + 71, length - 72);
+      if (!ShouldIgnoreTrace(msg)) {
+        LOG_V(sev) << "WebRtc ViE:" << msg;
+      }
+    }
+  }
+}
+
+void WebRtcVideoEngine::ApplyLogging() {
+  int filter = 0;
+  switch (log_level_) {
+    case talk_base::LS_VERBOSE: filter |= webrtc::kTraceAll;
+    case talk_base::LS_INFO: filter |= webrtc::kTraceStateInfo;
+    case talk_base::LS_WARNING: filter |= webrtc::kTraceWarning;
+    case talk_base::LS_ERROR: filter |=
+        webrtc::kTraceError | webrtc::kTraceCritical;
+  }
+}
+
+// Rebuilds the codec list to be only those that are less intensive
+// than the specified codec.
+bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) {
+  if (!FindCodec(in_codec))
+    return false;
+
+  video_codecs_.clear();
+
+  bool found = false;
+  for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
+    const VideoCodecPref& pref(kVideoCodecPrefs[i]);
+    if (!found)
+      found = (in_codec.name == pref.name);
+    if (found) {
+      VideoCodec codec(pref.payload_type, pref.name,
+                       in_codec.width, in_codec.height, in_codec.framerate,
+                       ARRAY_SIZE(kVideoCodecPrefs) - i);
+      video_codecs_.push_back(codec);
+    }
+  }
+  ASSERT(found);
+  return true;
+}
+
+void WebRtcVideoEngine::Terminate() {
+  LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
+  SetCapture(false);
+  if (local_renderer_.get()) {
+    // If the renderer already set, stop it first
+    if (vie_wrapper_->render()->StopRender(capture_id_) != 0)
+      LOG_RTCERR1(StopRender, capture_id_);
+  }
+
+  if (vie_wrapper_->render()->DeRegisterVideoRenderModule(
+      *render_module_.get()) != 0)
+    LOG_RTCERR0(DeRegisterVideoRenderModule);
+
+  if ((vie_wrapper_->base()->DeregisterObserver()) != 0)
+    LOG_RTCERR0(DeregisterObserver);
+
+  if ((vie_wrapper_->base()->SetVoiceEngine(NULL)) != 0)
+    LOG_RTCERR0(SetVoiceEngine);
+
+  if (vie_wrapper_->engine()->SetTraceCallback(NULL) != 0)
+    LOG_RTCERR0(SetTraceCallback);
+}
+
+int WebRtcVideoEngine::GetCapabilities() {
+  return VIDEO_RECV | VIDEO_SEND;
+}
+
+bool WebRtcVideoEngine::SetOptions(int options) {
+  return true;
+}
+
+bool WebRtcVideoEngine::ReleaseCaptureDevice() {
+  if (capture_id_ != -1) {
+    // Stop capture
+    SetCapture(false);
+    // DisconnectCaptureDevice
+    WebRtcVideoMediaChannel* channel;
+    for (VideoChannels::const_iterator it = channels_.begin();
+        it != channels_.end(); ++it) {
+      ASSERT(*it != NULL);
+      channel = *it;
+      // Ignore the return value here as the channel may not have connected to
+      // the capturer yet.
+      vie_wrapper_->capture()->DisconnectCaptureDevice(
+          channel->video_channel());
+      channel->set_connected(false);
+    }
+    // ReleaseCaptureDevice
+    vie_wrapper_->capture()->ReleaseCaptureDevice(capture_id_);
+    capture_id_ = -1;
+  }
+
+  return true;
+}
+
+bool WebRtcVideoEngine::SetCaptureDevice(const Device* cam) {
+  ASSERT(cam != NULL);
+
+  ReleaseCaptureDevice();
+
+  webrtc::ViECapture* vie_capture = vie_wrapper_->capture();
+
+  // There's an external VCM
+  if (capture_module_) {
+    if (vie_capture->AllocateCaptureDevice(*capture_module_, capture_id_) != 0)
+      ASSERT(capture_id_ == -1);
+  } else if (!external_capture_) {
+    char device_name[256], device_id[256];
+    bool found = false;
+    for (int i = 0; i < vie_capture->NumberOfCaptureDevices(); ++i) {
+      if (vie_capture->GetCaptureDevice(i, device_name, sizeof(device_name),
+                                        device_id, sizeof(device_id)) == 0) {
+        // TODO: We should only compare the device_id here,
+        // however the devicemanager and webrtc use different format for th v4l2
+        // device id. So here we also compare the device_name for now.
+        // For example "usb-0000:00:1d.7-6" vs "/dev/video0".
+        if (cam->name.compare(device_name) == 0 ||
+            cam->id.compare(device_id) == 0) {
+          LOG(INFO) << "Found video capture device: " << device_name;
+          found = true;
+          break;
+        }
+      }
+    }
+    if (!found) {
+      return false;
+    }
+    if (vie_capture->AllocateCaptureDevice(device_id, strlen(device_id),
+                                           capture_id_) != 0) {
+      ASSERT(capture_id_ == -1);
+    }
+  }
+
+  if (capture_id_ != -1) {
+    // Connect to all the channels if there is any.
+    WebRtcVideoMediaChannel* channel;
+    for (VideoChannels::const_iterator it = channels_.begin();
+         it != channels_.end(); ++it) {
+      ASSERT(*it != NULL);
+      channel = *it;
+
+      // No channel should have been connected yet.
+      // In case of switching device, all channel connections should have been
+      // disconnected in ReleaseCaptureDevice() first.
+      ASSERT(!channel->connected());
+
+      if (vie_capture->ConnectCaptureDevice(capture_id_,
+                                            channel->video_channel()) == 0) {
+        channel->set_connected(true);
+      }
+    }
+    SetCapture(true);
+  }
+
+  return (capture_id_ != -1);
+}
+
+bool WebRtcVideoEngine::SetCaptureModule(webrtc::VideoCaptureModule* vcm) {
+  ReleaseCaptureDevice();
+  if (capture_module_) {
+    webrtc::VideoCaptureModule::Destroy(capture_module_);
+  }
+  capture_module_ = vcm;
+  external_capture_ = true;
+  return true;
+}
+
+bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
+  if (local_renderer_.get()) {
+    // If the renderer already set, stop and remove it first
+    if (vie_wrapper_->render()->StopRender(capture_id_) != 0) {
+      LOG_RTCERR1(StopRender, capture_id_);
+    }
+    if (vie_wrapper_->render()->RemoveRenderer(capture_id_) != 0) {
+      LOG_RTCERR1(RemoveRenderer, capture_id_);
+    }
+  }
+  local_renderer_.reset(new WebRtcRenderAdapter(renderer));
+
+  int ret;
+  ret = vie_wrapper_->render()->AddRenderer(capture_id_,
+                                            webrtc::kVideoI420,
+                                            local_renderer_.get());
+  if (ret != 0)
+    return false;
+  ret = vie_wrapper_->render()->StartRender(capture_id_);
+  return (ret == 0);
+}
+
+CaptureResult WebRtcVideoEngine::SetCapture(bool capture) {
+  if ((capture_started_ != capture) && (capture_id_ != -1)) {
+    int ret;
+    if (capture)
+      ret = vie_wrapper_->capture()->StartCapture(capture_id_);
+    else
+      ret = vie_wrapper_->capture()->StopCapture(capture_id_);
+    if (ret != 0)
+      return CR_NO_DEVICE;
+    capture_started_ = capture;
+  }
+  return CR_SUCCESS;
+}
+
+const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
+  return video_codecs_;
+}
+
+void WebRtcVideoEngine::SetLogging(int min_sev, const char* filter) {
+  log_level_ = min_sev;
+  ApplyLogging();
+}
+
+int WebRtcVideoEngine::GetLastEngineError() {
+  return vie_wrapper_->error();
+}
+
+bool WebRtcVideoEngine::SetDefaultEncoderConfig(
+    const VideoEncoderConfig& config) {
+  default_encoder_config_ = config;
+  return true;
+}
+
+WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel(
+    VoiceMediaChannel* voice_channel) {
+  WebRtcVideoMediaChannel* channel =
+      new WebRtcVideoMediaChannel(this, voice_channel);
+  if (channel) {
+    if (!channel->Init()) {
+      delete channel;
+      channel = NULL;
+    }
+  }
+  return channel;
+}
+
+// Checks to see whether we comprehend and could receive a particular codec
+bool WebRtcVideoEngine::FindCodec(const VideoCodec& in) {
+  for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) {
+    const VideoFormat& fmt = kVideoFormats[i];
+    if (fmt.width == in.width && fmt.height == in.height) {
+      for (int j = 0; j < ARRAY_SIZE(kVideoCodecPrefs); ++j) {
+        VideoCodec codec(kVideoCodecPrefs[j].payload_type,
+                         kVideoCodecPrefs[j].name, 0, 0, 0, 0);
+        if (codec.Matches(in)) {
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
+// SetDefaultCodec may be called while the capturer is running. For example, a
+// test call is started in a page with QVGA default codec, and then a real call
+// is started in another page with VGA default codec. This is the corner case
+// and happens only when a session is started. We ignore this case currently.
+bool WebRtcVideoEngine::SetDefaultCodec(const VideoCodec& codec) {
+  if (!RebuildCodecList(codec)) {
+    LOG(LS_WARNING) << "Failed to RebuildCodecList";
+    return false;
+  }
+  return true;
+}
+
+void WebRtcVideoEngine::ConvertToCricketVideoCodec(
+    const webrtc::VideoCodec& in_codec, VideoCodec& out_codec) {
+  out_codec.id = in_codec.plType;
+  out_codec.name = in_codec.plName;
+  out_codec.width = in_codec.width;
+  out_codec.height = in_codec.height;
+  out_codec.framerate = in_codec.maxFramerate;
+}
+
+bool WebRtcVideoEngine::ConvertFromCricketVideoCodec(
+    const VideoCodec& in_codec, webrtc::VideoCodec& out_codec) {
+  bool found = false;
+  int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
+  for (int i = 0; i < ncodecs; ++i) {
+    if ((vie_wrapper_->codec()->GetCodec(i, out_codec) == 0) &&
+        (strncmp(out_codec.plName,
+                 in_codec.name.c_str(),
+                 webrtc::kPayloadNameSize - 1) == 0)) {
+      found = true;
+      break;
+    }
+  }
+
+  if (!found) {
+    LOG(LS_ERROR) << "invalid codec type";
+    return false;
+  }
+
+  if (in_codec.id != 0)
+    out_codec.plType = in_codec.id;
+
+  if (in_codec.width != 0)
+    out_codec.width = in_codec.width;
+
+  if (in_codec.height != 0)
+    out_codec.height = in_codec.height;
+
+  if (in_codec.framerate != 0)
+    out_codec.maxFramerate = in_codec.framerate;
+
+  out_codec.maxBitrate = kMaxVideoBitrate;
+  out_codec.startBitrate = kStartVideoBitrate;
+  out_codec.minBitrate = kStartVideoBitrate;
+
+  return true;
+}
+
+int WebRtcVideoEngine::GetLastVideoEngineError() {
+  return vie_wrapper_->base()->LastError();
+}
+
+void WebRtcVideoEngine::RegisterChannel(WebRtcVideoMediaChannel *channel) {
+  channels_.push_back(channel);
+}
+
+void WebRtcVideoEngine::UnregisterChannel(WebRtcVideoMediaChannel *channel) {
+  VideoChannels::iterator i = std::find(channels_.begin(),
+                                      channels_.end(),
+                                      channel);
+  if (i != channels_.end()) {
+    channels_.erase(i);
+  }
+}
+
+// WebRtcVideoMediaChannel
+
+WebRtcVideoMediaChannel::WebRtcVideoMediaChannel(
+    WebRtcVideoEngine* engine, VoiceMediaChannel* channel)
+    : engine_(engine),
+      voice_channel_(channel),
+      vie_channel_(-1),
+      sending_(false),
+      connected_(false),
+      render_started_(false),
+      send_codec_(NULL) {
+  engine->RegisterChannel(this);
+}
+
+bool WebRtcVideoMediaChannel::Init() {
+  bool ret = true;
+  if (engine_->video_engine()->base()->CreateChannel(vie_channel_) != 0) {
+    LOG_RTCERR1(CreateChannel, vie_channel_);
+    return false;
+  }
+
+  LOG(LS_INFO) << "WebRtcVideoMediaChannel::Init "
+               << "video_channel " << vie_channel_ << " created";
+
+  // connect audio channel
+  if (voice_channel_) {
+    WebRtcVoiceMediaChannel* channel =
+        static_cast<WebRtcVoiceMediaChannel*> (voice_channel_);
+    if (engine_->video_engine()->base()->ConnectAudioChannel(
+        vie_channel_, channel->voe_channel()) != 0) {
+      LOG(LS_WARNING) << "ViE ConnectAudioChannel failed"
+                   << "A/V not synchronized";
+      // Don't set ret to false;
+    }
+  }
+
+  // Register external transport
+  if (engine_->video_engine()->network()->RegisterSendTransport(
+      vie_channel_, *this) != 0) {
+    ret = false;
+  } else {
+    EnableRtcp();
+    EnablePLI();
+  }
+  return ret;
+}
+
+WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() {
+  // Stop and remote renderer
+  SetRender(false);
+  if (engine()->video_engine()->render()->RemoveRenderer(vie_channel_)
+      == -1) {
+    LOG_RTCERR1(RemoveRenderer, vie_channel_);
+  }
+
+  // DeRegister external transport
+  if (engine()->video_engine()->network()->DeregisterSendTransport(
+      vie_channel_) == -1) {
+    LOG_RTCERR1(DeregisterSendTransport, vie_channel_);
+  }
+
+  // Unregister RtcChannel with the engine.
+  engine()->UnregisterChannel(this);
+
+  // Delete VideoChannel
+  if (engine()->video_engine()->base()->DeleteChannel(vie_channel_) == -1) {
+    LOG_RTCERR1(DeleteChannel, vie_channel_);
+  }
+}
+
+bool WebRtcVideoMediaChannel::SetRecvCodecs(
+    const std::vector<VideoCodec>& codecs) {
+  bool ret = true;
+  for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
+      iter != codecs.end(); ++iter) {
+    if (engine()->FindCodec(*iter)) {
+      webrtc::VideoCodec wcodec;
+      if (engine()->ConvertFromCricketVideoCodec(*iter, wcodec)) {
+        if (engine()->video_engine()->codec()->SetReceiveCodec(
+            vie_channel_,  wcodec) != 0) {
+          LOG_RTCERR2(SetReceiveCodec, vie_channel_, wcodec.plName);
+          ret = false;
+        }
+      }
+    } else {
+      LOG(LS_INFO) << "Unknown codec" << iter->name;
+      ret = false;
+    }
+  }
+
+  // make channel ready to receive packets
+  if (ret) {
+    if (engine()->video_engine()->base()->StartReceive(vie_channel_) != 0) {
+      LOG_RTCERR1(StartReceive, vie_channel_);
+      ret = false;
+    }
+  }
+  return ret;
+}
+
+bool WebRtcVideoMediaChannel::SetSendCodecs(
+    const std::vector<VideoCodec>& codecs) {
+  if (sending_) {
+    LOG(LS_ERROR) << "channel is alredy sending";
+    return false;
+  }
+
+  // match with local video codec list
+  std::vector<webrtc::VideoCodec> send_codecs;
+  for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
+      iter != codecs.end(); ++iter) {
+    if (engine()->FindCodec(*iter)) {
+      webrtc::VideoCodec wcodec;
+      if (engine()->ConvertFromCricketVideoCodec(*iter, wcodec))
+        send_codecs.push_back(wcodec);
+    }
+  }
+
+  // if none matches, return with set
+  if (send_codecs.empty()) {
+    LOG(LS_ERROR) << "No matching codecs avilable";
+    return false;
+  }
+
+  // select the first matched codec
+  const webrtc::VideoCodec& codec(send_codecs[0]);
+  send_codec_.reset(new webrtc::VideoCodec(codec));
+  if (engine()->video_engine()->codec()->SetSendCodec(
+      vie_channel_, codec) != 0) {
+    LOG_RTCERR2(SetSendCodec, vie_channel_, codec.plName);
+    return false;
+  }
+  return true;
+}
+
+bool WebRtcVideoMediaChannel::SetRender(bool render) {
+  if (render != render_started_) {
+    int ret;
+    if (render) {
+      ret = engine()->video_engine()->render()->StartRender(vie_channel_);
+    } else {
+      ret = engine()->video_engine()->render()->StopRender(vie_channel_);
+    }
+    if (ret != 0) {
+      return false;
+    }
+    render_started_ = render;
+  }
+  return true;
+}
+
+bool WebRtcVideoMediaChannel::SetSend(bool send) {
+  if (send == sending()) {
+    return true;  // no action required
+  }
+
+  bool ret = true;
+  if (send) {  // enable
+    if (engine()->video_engine()->base()->StartSend(vie_channel_) != 0) {
+      LOG_RTCERR1(StartSend, vie_channel_);
+      ret = false;
+    }
+
+    // If the channel has not been connected to the capturer yet,
+    // connect it now.
+    if (!connected()) {
+      if (engine()->video_engine()->capture()->ConnectCaptureDevice(
+          engine()->capture_id(), vie_channel_) != 0) {
+        LOG_RTCERR2(ConnectCaptureDevice, engine()->capture_id(), vie_channel_);
+        ret = false;
+      } else {
+        set_connected(true);
+      }
+    }
+  } else {  // disable
+    if (engine()->video_engine()->base()->StopSend(vie_channel_) != 0) {
+      LOG_RTCERR1(StopSend, vie_channel_);
+      ret = false;
+    }
+  }
+  if (ret) {
+    sending_ = send;
+  }
+
+  return ret;
+}
+
+bool WebRtcVideoMediaChannel::AddStream(uint32 ssrc, uint32 voice_ssrc) {
+  return false;
+}
+
+bool WebRtcVideoMediaChannel::RemoveStream(uint32 ssrc) {
+  return false;
+}
+
+bool WebRtcVideoMediaChannel::SetRenderer(
+    uint32 ssrc, VideoRenderer* renderer) {
+  ASSERT(vie_channel_ != -1);
+  if (ssrc != 0)
+    return false;
+  if (remote_renderer_.get()) {
+    // If the renderer already set, stop and remove it first
+    if (engine_->video_engine()->render()->StopRender(vie_channel_) != 0) {
+      LOG_RTCERR1(StopRender, vie_channel_);
+    }
+    if (engine_->video_engine()->render()->RemoveRenderer(vie_channel_) != 0) {
+      LOG_RTCERR1(RemoveRenderer, vie_channel_);
+    }
+  }
+  remote_renderer_.reset(new WebRtcRenderAdapter(renderer));
+
+  if (engine_->video_engine()->render()->AddRenderer(vie_channel_,
+      webrtc::kVideoI420, remote_renderer_.get()) != 0) {
+    LOG_RTCERR3(AddRenderer, vie_channel_, webrtc::kVideoI420,
+                remote_renderer_.get());
+    remote_renderer_.reset();
+    return false;
+  }
+
+  if (engine_->video_engine()->render()->StartRender(vie_channel_) != 0) {
+    LOG_RTCERR1(StartRender, vie_channel_);
+    return false;
+  }
+
+  return true;
+}
+
+bool WebRtcVideoMediaChannel::GetStats(VideoMediaInfo* info) {
+  VideoSenderInfo sinfo;
+  memset(&sinfo, 0, sizeof(sinfo));
+
+  unsigned int ssrc;
+  if (engine_->video_engine()->rtp()->GetLocalSSRC(vie_channel_,
+                                                   ssrc) != 0) {
+    LOG_RTCERR2(GetLocalSSRC, vie_channel_, ssrc);
+    return false;
+  }
+  sinfo.ssrc = ssrc;
+
+  unsigned int cumulative_lost, extended_max, jitter;
+  int rtt_ms;
+  uint16 fraction_lost;
+
+  if (engine_->video_engine()->rtp()->GetReceivedRTCPStatistics(vie_channel_,
+          fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
+    LOG_RTCERR6(GetReceivedRTCPStatistics, vie_channel_,
+        fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms);
+    return false;
+  }
+
+  sinfo.fraction_lost = fraction_lost;
+  sinfo.packets_lost = cumulative_lost;
+  sinfo.rtt_ms = rtt_ms;
+
+  unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
+  if (engine_->video_engine()->rtp()->GetRTPStatistics(vie_channel_,
+          bytes_sent, packets_sent, bytes_recv, packets_recv) != 0) {
+    LOG_RTCERR5(GetRTPStatistics, vie_channel_,
+        bytes_sent, packets_sent, bytes_recv, packets_recv);
+    return false;
+  }
+  sinfo.packets_sent = packets_sent;
+  sinfo.bytes_sent = bytes_sent;
+  sinfo.packets_lost = -1;
+  sinfo.packets_cached = -1;
+
+  info->senders.push_back(sinfo);
+
+  // build receiver info.
+  // reusing the above local variables
+  VideoReceiverInfo rinfo;
+  memset(&rinfo, 0, sizeof(rinfo));
+  if (engine_->video_engine()->rtp()->GetSentRTCPStatistics(vie_channel_,
+          fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms) != 0) {
+    LOG_RTCERR6(GetSentRTCPStatistics, vie_channel_,
+        fraction_lost, cumulative_lost, extended_max, jitter, rtt_ms);
+    return false;
+  }
+  rinfo.bytes_rcvd = bytes_recv;
+  rinfo.packets_rcvd = packets_recv;
+  rinfo.fraction_lost = fraction_lost;
+  rinfo.packets_lost = cumulative_lost;
+
+  if (engine_->video_engine()->rtp()->GetRemoteSSRC(vie_channel_,
+                                                    ssrc) != 0) {
+    return false;
+  }
+  rinfo.ssrc = ssrc;
+
+  // Get codec for wxh
+  info->receivers.push_back(rinfo);
+  return true;
+}
+
+bool WebRtcVideoMediaChannel::SendIntraFrame() {
+  bool ret = true;
+  if (engine()->video_engine()->codec()->SendKeyFrame(vie_channel_) != 0) {
+    LOG_RTCERR1(SendKeyFrame, vie_channel_);
+    ret = false;
+  }
+
+  return ret;
+}
+
+bool WebRtcVideoMediaChannel::RequestIntraFrame() {
+  // There is no API exposed to application to request a key frame
+  // ViE does this internally when there are errors from decoder
+  return false;
+}
+
+void WebRtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
+  engine()->video_engine()->network()->ReceivedRTPPacket(vie_channel_,
+                                                         packet->data(),
+                                                         packet->length());
+}
+
+void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
+  engine_->video_engine()->network()->ReceivedRTCPPacket(vie_channel_,
+                                                         packet->data(),
+                                                         packet->length());
+}
+
+void WebRtcVideoMediaChannel::SetSendSsrc(uint32 id) {
+  if (!sending_) {
+    if (engine()->video_engine()->rtp()->SetLocalSSRC(vie_channel_,
+                                                      id) != 0) {
+      LOG_RTCERR1(SetLocalSSRC, vie_channel_);
+    }
+  } else {
+    LOG(LS_ERROR) << "Channel already in send state";
+  }
+}
+
+bool WebRtcVideoMediaChannel::SetRtcpCName(const std::string& cname) {
+  if (engine()->video_engine()->rtp()->SetRTCPCName(vie_channel_,
+                                                    cname.c_str()) != 0) {
+    LOG_RTCERR2(SetRTCPCName, vie_channel_, cname.c_str());
+    return false;
+  }
+  return true;
+}
+
+bool WebRtcVideoMediaChannel::Mute(bool on) {
+  // stop send??
+  return false;
+}
+
+bool WebRtcVideoMediaChannel::SetSendBandwidth(bool autobw, int bps) {
+  LOG(LS_INFO) << "RtcVideoMediaChanne::SetSendBandwidth";
+
+  if (!send_codec_.get()) {
+    LOG(LS_INFO) << "The send codec has not been set up yet.";
+    return true;
+  }
+
+  if (!autobw) {
+    send_codec_->startBitrate = bps;
+    send_codec_->minBitrate = bps;
+  }
+  send_codec_->maxBitrate = bps;
+
+  if (engine()->video_engine()->codec()->SetSendCodec(vie_channel_,
+      *send_codec_.get()) != 0) {
+    LOG_RTCERR2(SetSendCodec, vie_channel_, send_codec_->plName);
+    return false;
+  }
+
+  return true;
+}
+
+bool WebRtcVideoMediaChannel::SetOptions(int options) {
+  return true;
+}
+
+void WebRtcVideoMediaChannel::EnableRtcp() {
+  engine()->video_engine()->rtp()->SetRTCPStatus(
+      vie_channel_, webrtc::kRtcpCompound_RFC4585);
+}
+
+void WebRtcVideoMediaChannel::EnablePLI() {
+  engine_->video_engine()->rtp()->SetKeyFrameRequestMethod(
+      vie_channel_, webrtc::kViEKeyFrameRequestPliRtcp);
+}
+
+void WebRtcVideoMediaChannel::EnableTMMBR() {
+  engine_->video_engine()->rtp()->SetTMMBRStatus(vie_channel_, true);
+}
+
+int WebRtcVideoMediaChannel::SendPacket(int channel, const void* data,
+                                        int len) {
+  if (!network_interface_) {
+    return -1;
+  }
+  talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+  return network_interface_->SendPacket(&packet) ? len : -1;
+}
+
+int WebRtcVideoMediaChannel::SendRTCPPacket(int channel,
+                                         const void* data,
+                                         int len) {
+  if (!network_interface_) {
+    return -1;
+  }
+  talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+  return network_interface_->SendRtcp(&packet) ? len : -1;
+}
+
+}  // namespace cricket
+
+#endif  // HAVE_WEBRTC_VIDEO
+
diff --git a/talk/session/phone/webrtcvideoengine.h b/talk/session/phone/webrtcvideoengine.h
new file mode 100644
index 0000000..8daa81c
--- /dev/null
+++ b/talk/session/phone/webrtcvideoengine.h
@@ -0,0 +1,207 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
+#define TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
+
+#include <vector>
+
+#include "talk/base/scoped_ptr.h"
+#include "talk/session/phone/videocommon.h"
+#include "talk/session/phone/codec.h"
+#include "talk/session/phone/channel.h"
+#include "talk/session/phone/webrtccommon.h"
+
+namespace webrtc {
+class VideoCaptureModule;
+class VideoRender;
+}
+
+namespace cricket {
+struct Device;
+class VideoCapturer;
+class VideoRenderer;
+class ViEWrapper;
+class VoiceMediaChannel;
+class WebRtcRenderAdapter;
+class WebRtcVideoMediaChannel;
+class WebRtcVoiceEngine;
+
+class WebRtcVideoEngine : public webrtc::ViEBaseObserver,
+                          public webrtc::TraceCallback {
+ public:
+  // Creates the WebRtcVideoEngine with internal VideoCaptureModule.
+  WebRtcVideoEngine();
+  // Creates the WebRtcVideoEngine, and specifies the WebRtcVoiceEngine and
+  // external VideoCaptureModule to use.
+  WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
+                    webrtc::VideoCaptureModule* capture);
+  // For testing purposes. Allows the WebRtcVoiceEngine and
+  // ViEWrapper to be mocks.
+  WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine, ViEWrapper* vie_wrapper);
+  ~WebRtcVideoEngine();
+
+  bool Init();
+  void Terminate();
+
+  WebRtcVideoMediaChannel* CreateChannel(
+      VoiceMediaChannel* voice_channel);
+  bool FindCodec(const VideoCodec& in);
+  bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
+
+  void RegisterChannel(WebRtcVideoMediaChannel* channel);
+  void UnregisterChannel(WebRtcVideoMediaChannel* channel);
+
+  ViEWrapper* video_engine() { return vie_wrapper_.get(); }
+  int GetLastVideoEngineError();
+  int GetCapabilities();
+  bool SetOptions(int options);
+  bool SetCaptureDevice(const Device* device);
+  bool SetCaptureModule(webrtc::VideoCaptureModule* vcm);
+  int capture_id() const { return capture_id_; }
+  bool SetLocalRenderer(VideoRenderer* renderer);
+  CaptureResult SetCapture(bool capture);
+  const std::vector<VideoCodec>& codecs() const;
+  void SetLogging(int min_sev, const char* filter);
+
+  int GetLastEngineError();
+
+  VideoEncoderConfig& default_encoder_config() {
+    return default_encoder_config_;
+  }
+
+  void ConvertToCricketVideoCodec(const webrtc::VideoCodec& in_codec,
+                                  VideoCodec& out_codec);
+
+  bool ConvertFromCricketVideoCodec(const VideoCodec& in_codec,
+                                    webrtc::VideoCodec& out_codec);
+
+  sigslot::signal2<VideoCapturer*, CaptureResult> SignalCaptureResult;
+
+ private:
+  struct VideoCodecPref {
+    const char* name;
+    int payload_type;
+    int pref;
+  };
+
+  static const VideoCodecPref kVideoCodecPrefs[];
+  static const VideoFormat kVideoFormats[];
+  static const VideoFormat kDefaultVideoFormat;
+
+  void Construct();
+  bool SetDefaultCodec(const VideoCodec& codec);
+  bool RebuildCodecList(const VideoCodec& max_codec);
+
+  void ApplyLogging();
+  bool InitVideoEngine();
+  void PerformanceAlarm(const unsigned int cpu_load);
+  bool ReleaseCaptureDevice();
+  virtual void Print(const webrtc::TraceLevel level, const char* trace_string,
+                     const int length);
+
+  typedef std::vector<WebRtcVideoMediaChannel*> VideoChannels;
+
+  talk_base::scoped_ptr<ViEWrapper> vie_wrapper_;
+  webrtc::VideoCaptureModule* capture_module_;
+  bool external_capture_;
+  int capture_id_;
+  talk_base::scoped_ptr<webrtc::VideoRender> render_module_;
+  WebRtcVoiceEngine* voice_engine_;
+  std::vector<VideoCodec> video_codecs_;
+  VideoChannels channels_;
+  int log_level_;
+  VideoEncoderConfig default_encoder_config_;
+  bool capture_started_;
+  talk_base::scoped_ptr<WebRtcRenderAdapter> local_renderer_;
+};
+
+class WebRtcVideoMediaChannel : public VideoMediaChannel,
+                                public webrtc::Transport {
+ public:
+  WebRtcVideoMediaChannel(
+      WebRtcVideoEngine* engine, VoiceMediaChannel* voice_channel);
+  ~WebRtcVideoMediaChannel();
+
+  bool Init();
+  virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs);
+  virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs);
+  virtual bool SetRender(bool render);
+  virtual bool SetSend(bool send);
+  virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc);
+  virtual bool RemoveStream(uint32 ssrc);
+  virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
+  virtual bool GetStats(VideoMediaInfo* info);
+  virtual bool SendIntraFrame();
+  virtual bool RequestIntraFrame();
+
+  virtual void OnPacketReceived(talk_base::Buffer* packet);
+  virtual void OnRtcpReceived(talk_base::Buffer* packet);
+  virtual void SetSendSsrc(uint32 id);
+  virtual bool SetRtcpCName(const std::string& cname);
+  virtual bool Mute(bool on);
+  virtual bool SetRecvRtpHeaderExtensions(
+      const std::vector<RtpHeaderExtension>& extensions) {
+    return false;
+  }
+  virtual bool SetSendRtpHeaderExtensions(
+      const std::vector<RtpHeaderExtension>& extensions) {
+    return false;
+  }
+  virtual bool SetSendBandwidth(bool autobw, int bps);
+  virtual bool SetOptions(int options);
+
+  WebRtcVideoEngine* engine() const { return engine_; }
+  VoiceMediaChannel* voice_channel() const { return voice_channel_; }
+  int video_channel() const { return vie_channel_; }
+  bool sending() const { return sending_; }
+  void set_connected(bool connected) { connected_ = connected; }
+  bool connected() const { return connected_; }
+
+ protected:
+  int GetLastEngineError() { return engine()->GetLastEngineError(); }
+  virtual int SendPacket(int channel, const void* data, int len);
+  virtual int SendRTCPPacket(int channel, const void* data, int len);
+
+ private:
+  void EnableRtcp();
+  void EnablePLI();
+  void EnableTMMBR();
+
+  WebRtcVideoEngine* engine_;
+  VoiceMediaChannel* voice_channel_;
+  int vie_channel_;
+  bool sending_;
+  // connected to the capture device or not.
+  bool connected_;
+  bool render_started_;
+  talk_base::scoped_ptr<webrtc::VideoCodec> send_codec_;
+  talk_base::scoped_ptr<WebRtcRenderAdapter> remote_renderer_;
+};
+}  // namespace cricket
+
+#endif  // TALK_SESSION_PHONE_WEBRTCVIDEOENGINE_H_
diff --git a/talk/session/phone/webrtcvideoengine_unittest.cc b/talk/session/phone/webrtcvideoengine_unittest.cc
new file mode 100644
index 0000000..8e985d7
--- /dev/null
+++ b/talk/session/phone/webrtcvideoengine_unittest.cc
@@ -0,0 +1,183 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+//
+// Author: Ronghua Wu (ronghuawu@google.com)
+//         Zhurun Zhang (zhurunz@google.com)
+
+#include "talk/base/byteorder.h"
+#include "talk/base/gunit.h"
+#include "talk/session/phone/channel.h"
+#include "talk/session/phone/fakemediaengine.h"
+#include "talk/session/phone/fakertp.h"
+#include "talk/session/phone/fakesession.h"
+#include "talk/session/phone/fakewebrtcvideoengine.h"
+#include "talk/session/phone/fakewebrtcvoiceengine.h"
+#include "talk/session/phone/webrtcvideoengine.h"
+#include "talk/session/phone/webrtcvoiceengine.h"
+
+// Tests for the WebRtcVideoEngine/VideoChannel code.
+
+static const cricket::VideoCodec kVP8Codec(104, "VP8", 320, 200, 30, 0);
+static const cricket::VideoCodec* const kVideoCodecs[] = {
+    &kVP8Codec,
+};
+
+class FakeViEWrapper : public cricket::ViEWrapper {
+ public:
+  explicit FakeViEWrapper(cricket::FakeWebRtcVideoEngine* engine)
+      : cricket::ViEWrapper(engine, engine, engine, engine,
+                            engine, engine, engine) {
+  }
+};
+
+class WebRtcVideoEngineTest : public testing::Test {
+ public:
+  class ChannelErrorListener : public sigslot::has_slots<> {
+   public:
+    explicit ChannelErrorListener(cricket::WebRtcVideoMediaChannel* channel)
+        : ssrc_(0), error_(cricket::WebRtcVideoMediaChannel::ERROR_NONE) {
+      ASSERT(channel != NULL);
+      channel->SignalMediaError.connect(
+          this, &ChannelErrorListener::OnVideoChannelError);
+    }
+    void OnVideoChannelError(uint32 ssrc,
+                             cricket::WebRtcVideoMediaChannel::Error error) {
+      ssrc_ = ssrc;
+      error_ = error;
+    }
+    void Reset() {
+      ssrc_ = 0;
+      error_ = cricket::WebRtcVideoMediaChannel::ERROR_NONE;
+    }
+    uint32 ssrc() const {
+      return ssrc_;
+    }
+    cricket::WebRtcVideoMediaChannel::Error error() const {
+      return error_;
+    }
+
+   private:
+    uint32 ssrc_;
+    cricket::WebRtcVideoMediaChannel::Error error_;
+  };
+
+  WebRtcVideoEngineTest()
+      : vie_(kVideoCodecs, ARRAY_SIZE(kVideoCodecs)),
+        engine_(NULL,  // cricket::WebRtcVoiceEngine
+                new FakeViEWrapper(&vie_)),
+        channel_(NULL),
+        voice_channel_(NULL) {
+  }
+  bool SetupEngine() {
+    bool result = engine_.Init();
+    if (result) {
+      channel_ = engine_.CreateChannel(voice_channel_);
+      result = (channel_ != NULL);
+    }
+    return result;
+  }
+  void DeliverPacket(const void* data, int len) {
+    talk_base::Buffer packet(data, len);
+    channel_->OnPacketReceived(&packet);
+  }
+  virtual void TearDown() {
+    delete channel_;
+    engine_.Terminate();
+  }
+
+ protected:
+  cricket::FakeWebRtcVideoEngine vie_;
+  cricket::WebRtcVideoEngine engine_;
+  cricket::WebRtcVideoMediaChannel* channel_;
+  cricket::WebRtcVoiceMediaChannel* voice_channel_;
+};
+
+// Tests that our stub library "works".
+TEST_F(WebRtcVideoEngineTest, StartupShutdown) {
+  EXPECT_FALSE(vie_.IsInited());
+  EXPECT_TRUE(engine_.Init());
+  EXPECT_TRUE(vie_.IsInited());
+  engine_.Terminate();
+  // TODO: what to expect after Terminate
+  // EXPECT_FALSE(vie_.IsInited());
+}
+
+// Tests that we can create and destroy a channel.
+TEST_F(WebRtcVideoEngineTest, CreateChannel) {
+  EXPECT_TRUE(engine_.Init());
+  channel_ = engine_.CreateChannel(voice_channel_);
+  EXPECT_TRUE(channel_ != NULL);
+}
+
+// Tests that we properly handle failures in CreateChannel.
+TEST_F(WebRtcVideoEngineTest, CreateChannelFail) {
+  vie_.set_fail_create_channel(true);
+  EXPECT_TRUE(engine_.Init());
+  channel_ = engine_.CreateChannel(voice_channel_);
+  EXPECT_TRUE(channel_ == NULL);
+}
+
+// Tests that we can find codecs by name or id
+TEST_F(WebRtcVideoEngineTest, FindCodec) {
+  // We should not need to init engine in order to get codecs.
+  const std::vector<cricket::VideoCodec>& c = engine_.codecs();
+  EXPECT_EQ(1U, c.size());
+
+  cricket::VideoCodec vp8(104, "VP8", 320, 200, 30, 0);
+  EXPECT_TRUE(engine_.FindCodec(vp8));
+
+  cricket::VideoCodec vp8_diff_fr_diff_pref(104, "VP8", 320, 200, 50, 50);
+  EXPECT_TRUE(engine_.FindCodec(vp8_diff_fr_diff_pref));
+
+  cricket::VideoCodec vp8_diff_id(95, "VP8", 320, 200, 30, 0);
+  EXPECT_FALSE(engine_.FindCodec(vp8_diff_id));
+  vp8_diff_id.id = 97;
+  EXPECT_TRUE(engine_.FindCodec(vp8_diff_id));
+
+  cricket::VideoCodec vp8_diff_res(104, "VP8", 320, 111, 30, 0);
+  EXPECT_FALSE(engine_.FindCodec(vp8_diff_res));
+}
+
+// Test that we set our inbound codecs properly
+TEST_F(WebRtcVideoEngineTest, SetRecvCodecs) {
+  EXPECT_TRUE(SetupEngine());
+  std::vector<cricket::VideoCodec> codecs;
+  codecs.push_back(kVP8Codec);
+  EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+}
+
+// Test that we apply codecs properly.
+TEST_F(WebRtcVideoEngineTest, SetSendCodecs) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = vie_.GetLastChannel();
+  std::vector<cricket::VideoCodec> codecs;
+  codecs.push_back(kVP8Codec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::VideoCodec gcodec;
+  EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(kVP8Codec.id, gcodec.plType);
+  EXPECT_EQ(kVP8Codec.width, gcodec.width);
+  EXPECT_EQ(kVP8Codec.height, gcodec.height);
+  EXPECT_STREQ(kVP8Codec.name.c_str(), gcodec.plName);
+}
+
+// TODO: add tests for below interfaces
+// bool SetOptions(int options);
+// bool SetCaptureDevice(const Device* device);
+// bool SetLocalRenderer(VideoRenderer* renderer);
+// CaptureResult SetCapture(bool capture);
+// virtual bool SetRender(bool render);
+// virtual bool SetSend(bool send);
+// virtual bool AddStream(uint32 ssrc, uint32 voice_ssrc);
+// virtual bool RemoveStream(uint32 ssrc);
+// virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
+// virtual bool GetStats(VideoMediaInfo* info);
+// virtual bool SendIntraFrame();
+// virtual bool RequestIntraFrame();
+// virtual void OnPacketReceived(talk_base::Buffer* packet);
+// virtual void OnRtcpReceived(talk_base::Buffer* packet);
+// virtual void SetSendSsrc(uint32 id);
+// virtual bool SetRtcpCName(const std::string& cname);
+// virtual bool Mute(bool on);
+// virtual bool SetSendBandwidth(bool autobw, int bps);
+// virtual bool SetOptions(int options);
+
diff --git a/talk/session/phone/webrtcvideoframe.cc b/talk/session/phone/webrtcvideoframe.cc
new file mode 100644
index 0000000..c2a3470
--- /dev/null
+++ b/talk/session/phone/webrtcvideoframe.cc
@@ -0,0 +1,244 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/phone/webrtcvideoframe.h"
+
+#include "talk/base/logging.h"
+#include "talk/session/phone/videocommon.h"
+#ifdef WEBRTC_RELATIVE_PATH
+#include "common_video/vplib/main/interface/vplib.h"
+#else
+#include "third_party/webrtc/files/include/vplib.h"
+#endif
+
+namespace cricket {
+WebRtcVideoFrame::WebRtcVideoFrame() {
+}
+
+WebRtcVideoFrame::~WebRtcVideoFrame() {
+}
+
+void WebRtcVideoFrame::Attach(uint8* buffer, size_t buffer_size, size_t w,
+                              size_t h, int64 elapsed_time, int64 time_stamp) {
+  video_frame_.Free();
+  WebRtc_UWord8* new_memory = buffer;
+  WebRtc_UWord32 new_length = buffer_size;
+  WebRtc_UWord32 new_size = buffer_size;
+  video_frame_.Swap(new_memory, new_length, new_size);
+  video_frame_.SetWidth(w);
+  video_frame_.SetHeight(h);
+  elapsed_time_ = elapsed_time;
+  video_frame_.SetTimeStamp(static_cast<WebRtc_UWord32>(time_stamp));
+}
+
+void WebRtcVideoFrame::Detach(uint8** buffer, size_t* buffer_size) {
+  WebRtc_UWord8* new_memory = NULL;
+  WebRtc_UWord32 new_length = 0;
+  WebRtc_UWord32 new_size = 0;
+  video_frame_.Swap(new_memory, new_length, new_size);
+  *buffer = new_memory;
+  *buffer_size = new_size;
+}
+
+bool WebRtcVideoFrame::InitToBlack(size_t w, size_t h,
+                                   int64 elapsed_time, int64 time_stamp) {
+  size_t buffer_size = w * h * 3 / 2;
+  uint8* buffer = new uint8[buffer_size];
+  Attach(buffer, buffer_size, w, h, elapsed_time, time_stamp);
+  memset(GetYPlane(), 16, w * h);
+  memset(GetUPlane(), 128, w * h / 4);
+  memset(GetVPlane(), 128, w * h / 4);
+  return true;
+}
+
+size_t WebRtcVideoFrame::GetWidth() const {
+  return video_frame_.Width();
+}
+
+size_t WebRtcVideoFrame::GetHeight() const {
+  return video_frame_.Height();
+}
+
+const uint8* WebRtcVideoFrame::GetYPlane() const {
+  WebRtc_UWord8* buffer = video_frame_.Buffer();
+  return buffer;
+}
+
+const uint8* WebRtcVideoFrame::GetUPlane() const {
+  WebRtc_UWord8* buffer = video_frame_.Buffer();
+  if (buffer)
+    buffer += (video_frame_.Width() * video_frame_.Height());
+  return buffer;
+}
+
+const uint8* WebRtcVideoFrame::GetVPlane() const {
+  WebRtc_UWord8* buffer = video_frame_.Buffer();
+  if (buffer)
+    buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
+  return buffer;
+}
+
+uint8* WebRtcVideoFrame::GetYPlane() {
+  WebRtc_UWord8* buffer = video_frame_.Buffer();
+  return buffer;
+}
+
+uint8* WebRtcVideoFrame::GetUPlane() {
+  WebRtc_UWord8* buffer = video_frame_.Buffer();
+  if (buffer)
+    buffer += (video_frame_.Width() * video_frame_.Height());
+  return buffer;
+}
+
+uint8* WebRtcVideoFrame::GetVPlane() {
+  WebRtc_UWord8* buffer = video_frame_.Buffer();
+  if (buffer)
+    buffer += (video_frame_.Width() * video_frame_.Height() * 5 / 4);
+  return buffer;
+}
+
+VideoFrame* WebRtcVideoFrame::Copy() const {
+  WebRtc_UWord8* buffer = video_frame_.Buffer();
+  if (!buffer)
+    return NULL;
+
+  size_t new_buffer_size = video_frame_.Length();
+  uint8* new_buffer = new uint8[new_buffer_size];
+  memcpy(new_buffer, buffer, new_buffer_size);
+  WebRtcVideoFrame* copy = new WebRtcVideoFrame();
+  copy->Attach(new_buffer, new_buffer_size,
+               video_frame_.Width(), video_frame_.Height(),
+               elapsed_time_, video_frame_.TimeStamp());
+  return copy;
+}
+
+bool WebRtcVideoFrame::MakeExclusive() {
+  // WebRtcVideoFrame::Copy makes a deep copy of the frame buffer.  No action
+  // is needed for MakeExclusive.
+  return true;
+}
+
+size_t WebRtcVideoFrame::CopyToBuffer(
+    uint8* buffer, size_t size) const {
+  if (!video_frame_.Buffer()) {
+    return 0;
+  }
+
+  size_t needed = video_frame_.Length();
+  if (needed <= size) {
+    memcpy(buffer, video_frame_.Buffer(), needed);
+  }
+  return needed;
+}
+
+size_t WebRtcVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc,
+                                            uint8* buffer,
+                                            size_t size,
+                                            size_t pitch_rgb) const {
+  if (!video_frame_.Buffer()) {
+    return 0;
+  }
+
+  size_t width = video_frame_.Width();
+  size_t height = video_frame_.Height();
+  // See http://www.virtualdub.org/blog/pivot/entry.php?id=190 for a good
+  // explanation of pitch and why this is the amount of space we need.
+  size_t needed = pitch_rgb * (height - 1) + 4 * width;
+
+  if (needed > size) {
+    LOG(LS_WARNING) << "RGB buffer is not large enough";
+    return 0;
+  }
+
+  webrtc::VideoType outgoingVideoType = webrtc::kUnknown;
+  switch (to_fourcc) {
+    case FOURCC_ARGB:
+      outgoingVideoType = webrtc::kARGB;
+      break;
+    default:
+      LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
+      return 0;
+      break;
+  }
+
+  if (outgoingVideoType != webrtc::kUnknown)
+    webrtc::ConvertFromI420(outgoingVideoType, video_frame_.Buffer(),
+                    width, height, buffer);
+
+  return needed;
+}
+
+void WebRtcVideoFrame::StretchToPlanes(
+    uint8* y, uint8* u, uint8* v,
+    int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v,
+    size_t width, size_t height, bool interpolate, bool crop) const {
+  // TODO: Implement StretchToPlanes
+}
+
+size_t WebRtcVideoFrame::StretchToBuffer(size_t w, size_t h,
+                                         uint8* buffer, size_t size,
+                                         bool interpolate,
+                                         bool crop) const {
+  if (!video_frame_.Buffer()) {
+    return 0;
+  }
+
+  size_t needed = video_frame_.Length();
+
+  if (needed <= size) {
+    uint8* bufy = buffer;
+    uint8* bufu = bufy + w * h;
+    uint8* bufv = bufu + ((w + 1) >> 1) * ((h + 1) >> 1);
+    StretchToPlanes(bufy, bufu, bufv, w, (w + 1) >> 1, (w + 1) >> 1, w, h,
+                    interpolate, crop);
+  }
+  return needed;
+}
+
+void WebRtcVideoFrame::StretchToFrame(VideoFrame* target,
+    bool interpolate, bool crop) const {
+  if (!target) return;
+
+  StretchToPlanes(target->GetYPlane(),
+                  target->GetUPlane(),
+                  target->GetVPlane(),
+                  target->GetYPitch(),
+                  target->GetUPitch(),
+                  target->GetVPitch(),
+                  target->GetWidth(),
+                  target->GetHeight(),
+                  interpolate, crop);
+  target->SetElapsedTime(GetElapsedTime());
+  target->SetTimeStamp(GetTimeStamp());
+}
+
+VideoFrame* WebRtcVideoFrame::Stretch(size_t w, size_t h,
+    bool interpolate, bool crop) const {
+  // TODO: implement
+  return NULL;
+}
+}  // namespace cricket
diff --git a/talk/session/phone/webrtcvideoframe.h b/talk/session/phone/webrtcvideoframe.h
new file mode 100644
index 0000000..6538fbc
--- /dev/null
+++ b/talk/session/phone/webrtcvideoframe.h
@@ -0,0 +1,98 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
+#define TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "common_types.h"
+#include "modules/interface/module_common_types.h"
+#else
+#include "third_party/webrtc/files/include/common_types.h"
+#include "third_party/webrtc/files/include/module_common_types.h"
+#endif
+#include "talk/session/phone/videoframe.h"
+
+namespace cricket {
+// WebRtcVideoFrame only supports I420
+class WebRtcVideoFrame : public VideoFrame {
+ public:
+  WebRtcVideoFrame();
+  ~WebRtcVideoFrame();
+
+  void Attach(uint8* buffer, size_t buffer_size,
+              size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
+  void Detach(uint8** buffer, size_t* buffer_size);
+  bool InitToBlack(size_t w, size_t h, int64 elapsed_time, int64 time_stamp);
+  bool HasImage() const { return video_frame_.Buffer() != NULL; }
+
+  virtual size_t GetWidth() const;
+  virtual size_t GetHeight() const;
+  virtual const uint8* GetYPlane() const;
+  virtual const uint8* GetUPlane() const;
+  virtual const uint8* GetVPlane() const;
+  virtual uint8* GetYPlane();
+  virtual uint8* GetUPlane();
+  virtual uint8* GetVPlane();
+  virtual int32 GetYPitch() const { return video_frame_.Width(); }
+  virtual int32 GetUPitch() const { return video_frame_.Width() / 2; }
+  virtual int32 GetVPitch() const { return video_frame_.Width() / 2; }
+
+  virtual size_t GetPixelWidth() const { return 1; }
+  virtual size_t GetPixelHeight() const { return 1; }
+  virtual int64 GetElapsedTime() const { return elapsed_time_; }
+  virtual int64 GetTimeStamp() const { return video_frame_.TimeStamp(); }
+  virtual void SetElapsedTime(int64 elapsed_time) {
+    elapsed_time_ = elapsed_time;
+  }
+  virtual void SetTimeStamp(int64 time_stamp) {
+    video_frame_.SetTimeStamp(static_cast<WebRtc_UWord32>(time_stamp));
+  }
+
+  virtual VideoFrame* Copy() const;
+  virtual bool MakeExclusive();
+  virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
+  virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
+                                    size_t size, size_t pitch_rgb) const;
+  virtual void StretchToPlanes(uint8* y, uint8* u, uint8* v,
+                               int32 pitchY, int32 pitchU, int32 pitchV,
+                               size_t width, size_t height,
+                               bool interpolate, bool crop) const;
+  virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
+                                 bool interpolate, bool crop) const;
+  virtual void StretchToFrame(VideoFrame* target, bool interpolate,
+                              bool crop) const;
+  virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
+                              bool crop) const;
+
+ private:
+  webrtc::VideoFrame video_frame_;
+  int64 elapsed_time_;
+};
+}  // namespace cricket
+
+#endif  // TALK_SESSION_PHONE_WEBRTCVIDEOFRAME_H_
diff --git a/talk/session/phone/webrtcvideoframe_unittest.cc b/talk/session/phone/webrtcvideoframe_unittest.cc
new file mode 100644
index 0000000..4bd9244
--- /dev/null
+++ b/talk/session/phone/webrtcvideoframe_unittest.cc
@@ -0,0 +1,443 @@
+// Copyright 2008 Google Inc. All Rights Reserved,
+//
+// Author: Justin Uberti (juberti@google.com)
+//         Frank Barchard (fbarchard@google.com)
+#include <string>
+
+#include "talk/base/flags.h"
+#include "talk/base/gunit.h"
+#include "talk/base/pathutils.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stream.h"
+#include "talk/base/stringutils.h"
+#include "talk/session/phone/formatconversion.h"
+#include "talk/session/phone/webrtcvideoframe.h"
+#include "talk/session/phone/testutils.h"
+#include "talk/session/phone/videocommon.h"
+
+enum {
+  ROTATION_0 = 0,
+  ROTATION_90 = 90,
+  ROTATION_180 = 180,
+  ROTATION_270 = 270
+};
+
+using cricket::WebRtcVideoFrame;
+using cricket::FOURCC_I420;
+
+static const int kWidth = 1280;
+static const int kHeight = 720;
+static const int kAlignment = 16;
+static const std::string kImageFilename = "faces.1280x720_P420.yuv";
+
+DEFINE_int(yuvconverter_repeat, 1,
+    "how many times to perform each conversion operation (for perf testing)");
+
+class WebRtcVideoFrameTest : public testing::Test {
+ protected:
+  virtual void SetUp() {
+    repeat_ = FLAG_yuvconverter_repeat;
+  }
+
+ public:
+  // Load a video frame from disk or a buffer.
+  bool LoadFrame(const std::string& filename, uint32 format,
+                 int32 width, int32 height, WebRtcVideoFrame* frame,
+                 int rotation) {
+    talk_base::scoped_ptr<talk_base::MemoryStream> ms(LoadSample(filename));
+    return LoadFrame(ms.get(), format, width, height, frame, rotation);
+  }
+
+  bool LoadFrame(talk_base::MemoryStream* ms, uint32 format,
+                 int32 width, int32 height, WebRtcVideoFrame* frame,
+                 int rotation) {
+    if (!ms) {
+      return false;
+    }
+    size_t data_size;
+    bool ret = ms->GetSize(&data_size);
+    EXPECT_TRUE(ret);
+    if (ret) {
+      ret = LoadFrame(reinterpret_cast<uint8*>(ms->GetBuffer()), data_size,
+                      format, width, height, frame, rotation);
+    }
+    return ret;
+  }
+
+  bool LoadFrame(uint8* sample, size_t sample_size, uint32 format,
+                 int32 width, int32 height, WebRtcVideoFrame* frame,
+                 int rotation) {
+    // WebRtcVideoFrame only supporst I420 for now
+    if (format != FOURCC_I420)
+      return false;
+    for (int i = 0; i < repeat_; ++i) {
+      uint8* new_buffer = new uint8[sample_size];
+      memcpy(new_buffer, sample, sample_size);
+      frame->Attach(new_buffer, sample_size, width, height, 0, 0);
+    }
+    return true;
+  }
+
+  talk_base::MemoryStream* LoadSample(const std::string& filename) {
+    talk_base::Pathname path(cricket::GetTestFilePath(filename));
+    talk_base::scoped_ptr<talk_base::FileStream> fs(
+        talk_base::Filesystem::OpenFile(path, "rb"));
+    if (!fs.get()) {
+      return NULL;
+    }
+
+    char buf[4096];
+    talk_base::scoped_ptr<talk_base::MemoryStream> ms(
+        new talk_base::MemoryStream());
+    talk_base::StreamResult res = Flow(fs.get(), buf, sizeof(buf), ms.get());
+    if (res != talk_base::SR_SUCCESS) {
+      return NULL;
+    }
+
+    return ms.release();
+  }
+
+  // Write an I420 frame out to disk.
+  bool DumpFrame(const std::string& prefix,
+                 const WebRtcVideoFrame& frame) {
+    char filename[256];
+    talk_base::sprintfn(filename, sizeof(filename), "%s.%dx%d_P420.yuv",
+                        prefix.c_str(), frame.GetWidth(), frame.GetHeight());
+    size_t out_size = cricket::VideoFrame::SizeOf(frame.GetWidth(),
+                                                  frame.GetHeight());
+    talk_base::scoped_array<uint8> out(new uint8[out_size]);
+    frame.CopyToBuffer(out.get(), out_size);
+    return DumpSample(filename, out.get(), out_size);
+  }
+
+  bool DumpSample(const std::string& filename, const void* buffer, int size) {
+    talk_base::Pathname path(filename);
+    talk_base::scoped_ptr<talk_base::FileStream> fs(
+        talk_base::Filesystem::OpenFile(path, "wb"));
+    if (!fs.get()) {
+      return false;
+    }
+
+    return (fs->Write(buffer, size, NULL, NULL) == talk_base::SR_SUCCESS);
+  }
+
+  // Create a test image for YUV 420 formats with 12 bits per pixel.
+  talk_base::MemoryStream* CreateYuv420Sample(uint32 width, uint32 height) {
+    talk_base::scoped_ptr<talk_base::MemoryStream> ms(
+        new talk_base::MemoryStream);
+    if (!ms->ReserveSize(width * height * 12 / 8)) {
+      return NULL;
+    }
+
+    for (uint32 i = 0; i < width * height * 12 / 8; ++i) {
+      char value = ((i / 63) & 1) ? 192 : 64;
+      ms->Write(&value, sizeof(value), NULL, NULL);
+    }
+    return ms.release();
+  }
+
+  talk_base::MemoryStream* CreateRgbSample(uint32 fourcc,
+                                           uint32 width, uint32 height) {
+    int r_pos, g_pos, b_pos, bytes;
+    if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) {
+      return NULL;
+    }
+
+    talk_base::scoped_ptr<talk_base::MemoryStream> ms(
+        new talk_base::MemoryStream);
+    if (!ms->ReserveSize(width * height * bytes)) {
+      return NULL;
+    }
+
+    for (uint32 y = 0; y < height; ++y) {
+      for (uint32 x = 0; x < width; ++x) {
+        uint8 rgb[4] = { 255, 255, 255, 255 };
+        rgb[r_pos] = ((x / 63) & 1) ? 224 : 32;
+        rgb[g_pos] = (x % 63 + y % 63) + 96;
+        rgb[b_pos] = ((y / 63) & 1) ? 224 : 32;
+        ms->Write(rgb, bytes, NULL, NULL);
+      }
+    }
+    return ms.release();
+  }
+
+  // Convert RGB to 420.
+  // A negative height inverts the image.
+  bool ConvertRgb(const talk_base::MemoryStream* ms,
+                  uint32 fourcc, int32 width, int32 height,
+                  WebRtcVideoFrame* frame) {
+    int r_pos, g_pos, b_pos, bytes;
+    if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) {
+      return false;
+    }
+    int stride = width * bytes;
+    const uint8* start = reinterpret_cast<const uint8*>(ms->GetBuffer());
+    if (height < 0) {
+      height = -height;
+      start = start + stride * (height - 1);
+      stride = -stride;
+    }
+    frame->InitToBlack(width, height, 0, 0);
+    for (int32 y = 0; y < height; y += 2) {
+      for (int32 x = 0; x < width; x += 2) {
+        const uint8* rgb[4];
+        uint8 yuv[4][3];
+        rgb[0] = start + y * stride + x * bytes;
+        rgb[1] = rgb[0] + bytes;
+        rgb[2] = rgb[0] + stride;
+        rgb[3] = rgb[2] + bytes;
+        for (size_t i = 0; i < 4; ++i) {
+          ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos],
+                          &yuv[i][0], &yuv[i][1], &yuv[i][2]);
+        }
+        frame->GetYPlane()[width * y + x] = yuv[0][0];
+        frame->GetYPlane()[width * y + x + 1] = yuv[1][0];
+        frame->GetYPlane()[width * (y + 1) + x] = yuv[2][0];
+        frame->GetYPlane()[width * (y + 1) + x + 1] = yuv[3][0];
+        frame->GetUPlane()[width / 2 * (y / 2) + x / 2] =
+            (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4;
+        frame->GetVPlane()[width / 2 * (y / 2) + x / 2] =
+            (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4;
+      }
+    }
+    return true;
+  }
+
+  // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia.
+  void ConvertRgbPixel(uint8 r, uint8 g, uint8 b,
+                       uint8* y, uint8* u, uint8* v) {
+    *y = static_cast<int>(.257 * r + .504 * g + .098 * b) + 16;
+    *u = static_cast<int>(-.148 * r - .291 * g + .439 * b) + 128;
+    *v = static_cast<int>(.439 * r - .368 * g - .071 * b) + 128;
+  }
+
+  bool GetRgbPacking(uint32 fourcc,
+                     int* r_pos, int* g_pos, int* b_pos, int* bytes) {
+    if (fourcc == cricket::FOURCC_RAW) {
+      *r_pos = 0;
+      *g_pos = 1;
+      *b_pos = 2;
+      *bytes = 3;  // RGB in memory
+    } else if (fourcc == cricket::FOURCC_24BG) {
+      *r_pos = 2;
+      *g_pos = 1;
+      *b_pos = 0;
+      *bytes = 3;  // BGR in memory
+    } else if (fourcc == cricket::FOURCC_ABGR) {
+      *r_pos = 0;
+      *g_pos = 1;
+      *b_pos = 2;
+      *bytes = 4;  // RGBA in memory
+    } else if (fourcc == cricket::FOURCC_BGRA) {
+      *r_pos = 1;
+      *g_pos = 2;
+      *b_pos = 3;
+      *bytes = 4;  // ARGB in memory
+    } else if (fourcc == cricket::FOURCC_ARGB) {
+      *r_pos = 2;
+      *g_pos = 1;
+      *b_pos = 0;
+      *bytes = 4;  // BGRA in memory
+    } else {
+      return false;
+    }
+    return true;
+  }
+
+  // Comparison functions for testing.
+  static bool IsNull(const WebRtcVideoFrame& frame) {
+    return !frame.HasImage();
+  }
+
+  static bool IsSize(const WebRtcVideoFrame& frame,
+                     uint32 width, uint32 height) {
+    return frame.HasImage() &&
+        frame.GetYPitch() >= static_cast<int32>(width) &&
+        frame.GetUPitch() >= static_cast<int32>(width) / 2 &&
+        frame.GetVPitch() >= static_cast<int32>(width) / 2 &&
+        frame.GetWidth() == width && frame.GetHeight() == height;
+  }
+
+  static bool IsPlaneEqual(const std::string& name,
+                           const uint8* plane1, uint32 pitch1,
+                           const uint8* plane2, uint32 pitch2,
+                           uint32 width, uint32 height,
+                           int max_error) {
+    const uint8* r1 = plane1;
+    const uint8* r2 = plane2;
+    for (uint32 y = 0; y < height; ++y) {
+      for (uint32 x = 0; x < width; ++x) {
+        if (abs(static_cast<int>(r1[x] - r2[x])) > max_error) {
+          LOG(LS_INFO) << "IsPlaneEqual(" << name << "): pixel["
+                       << x << "," << y << "] differs: "
+                       << static_cast<int>(r1[x]) << " vs "
+                       << static_cast<int>(r2[x]);
+          return false;
+        }
+      }
+      r1 += pitch1;
+      r2 += pitch2;
+    }
+    return true;
+  }
+
+  static bool IsFrameContiguous(const WebRtcVideoFrame& frame) {
+    int width = frame.GetWidth();
+    int height = frame.GetHeight();
+    const uint8* y = frame.GetYPlane();
+    const uint8* u = frame.GetUPlane();
+    const uint8* v = frame.GetVPlane();
+    int size = width * height * 3 / 2;
+    bool u_near = (u - y) < size;
+    bool v_near = (v - y) < size;
+    return u_near && v_near;
+  }
+
+  static bool IsEqual(const WebRtcVideoFrame& frame,
+                      size_t width, size_t height,
+                      size_t pixel_width, size_t pixel_height,
+                      int64 elapsed_time, int64 time_stamp,
+                      const uint8* y, uint32 ypitch,
+                      const uint8* u, uint32 upitch,
+                      const uint8* v, uint32 vpitch,
+                      int max_error) {
+    if (!IsFrameContiguous(frame)) {
+      LOG(LS_INFO) << "lmi frame is not contiguous";
+    }
+    return IsSize(frame, width, height) &&
+        frame.GetPixelWidth() == pixel_width &&
+        frame.GetPixelHeight() == pixel_height &&
+        frame.GetElapsedTime() == elapsed_time &&
+        frame.GetTimeStamp() == time_stamp &&
+        IsPlaneEqual("y", frame.GetYPlane(), frame.GetYPitch(), y, ypitch,
+                     width, height, max_error) &&
+        IsPlaneEqual("u", frame.GetUPlane(), frame.GetUPitch(), u, upitch,
+                     width / 2, height / 2, max_error) &&
+        IsPlaneEqual("v", frame.GetVPlane(), frame.GetVPitch(), v, vpitch,
+                     width / 2, height / 2, max_error);
+  }
+
+  static bool IsEqual(const WebRtcVideoFrame& frame1,
+                      const WebRtcVideoFrame& frame2,
+                      int max_error) {
+    return IsEqual(frame1, frame2.GetWidth(), frame2.GetHeight(),
+                frame2.GetPixelWidth(), frame2.GetPixelHeight(),
+                frame2.GetElapsedTime(), frame2.GetTimeStamp(),
+                frame2.GetYPlane(), frame2.GetYPitch(),
+                frame2.GetUPlane(), frame2.GetUPitch(),
+                frame2.GetVPlane(), frame2.GetVPitch(),
+                max_error);
+  }
+
+ protected:
+  int repeat_;
+};
+
+TEST_F(WebRtcVideoFrameTest, ConvertToARGBBuffer) {
+  size_t out_size = kWidth * kHeight * 4;
+  talk_base::scoped_array<uint8> outbuf(new uint8[out_size + kAlignment]);
+  uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+  WebRtcVideoFrame frame;
+  ASSERT_TRUE(LoadFrame(kImageFilename, FOURCC_I420, kWidth, kHeight,
+                        &frame, ROTATION_0));
+
+  // TODO: Add test to convert these back to I420, to ensure the
+  // conversion is done correctly.
+  for (int i = 0; i < repeat_; ++i) {
+    EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB,
+                                                 out,
+                                                 out_size, kWidth * 4));
+  }
+}
+
+// Test basic contruction of an image from an I420 buffer.
+TEST_F(WebRtcVideoFrameTest, InitI420) {
+  WebRtcVideoFrame frame;
+  EXPECT_TRUE(IsNull(frame));
+  talk_base::scoped_ptr<talk_base::MemoryStream> ms(LoadSample(kImageFilename));
+  ASSERT_TRUE(ms.get() != NULL);
+  size_t data_size;
+  ASSERT_TRUE(ms->GetSize(&data_size));
+  uint8* buf = reinterpret_cast<uint8*>(ms->GetBuffer());
+  EXPECT_TRUE(LoadFrame(buf, data_size, FOURCC_I420,
+                        kWidth, kHeight, &frame, ROTATION_0));
+
+  const uint8* y = reinterpret_cast<uint8*>(ms->GetBuffer());
+  const uint8* u = y + kWidth * kHeight;
+  const uint8* v = u + kWidth * kHeight / 4;
+  EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, 1, 1, 0, 0,
+                      y, kWidth, u, kWidth / 2, v, kWidth / 2, 0));
+}
+
+// Test constructing an image from a I420 buffer
+TEST_F(WebRtcVideoFrameTest, ConstructI420) {
+  WebRtcVideoFrame frame;
+  talk_base::scoped_ptr<talk_base::MemoryStream> ms(
+      CreateYuv420Sample(kWidth, kHeight));
+  EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420,
+                        kWidth, kHeight, &frame, ROTATION_0));
+
+  const uint8* y = reinterpret_cast<uint8*>(ms.get()->GetBuffer());
+  const uint8* u = y + kWidth * kHeight;
+  const uint8* v = u + kWidth * kHeight / 4;
+  EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, 1, 1, 0, 0,
+                      y, kWidth, u, kWidth / 2, v, kWidth / 2, 0));
+}
+
+// Test creating an empty image and initing it to black.
+TEST_F(WebRtcVideoFrameTest, ConstructBlack) {
+  WebRtcVideoFrame frame;
+  for (int i = 0; i < repeat_; ++i) {
+    EXPECT_TRUE(frame.InitToBlack(kWidth, kHeight, 0, 0));
+  }
+  EXPECT_TRUE(IsSize(frame, kWidth, kHeight));
+  EXPECT_EQ(16, *frame.GetYPlane());
+  EXPECT_EQ(128, *frame.GetUPlane());
+  EXPECT_EQ(128, *frame.GetVPlane());
+}
+
+TEST_F(WebRtcVideoFrameTest, Copy) {
+  WebRtcVideoFrame frame1;
+  talk_base::scoped_ptr<cricket::WebRtcVideoFrame> frame2;
+  ASSERT_TRUE(LoadFrame(kImageFilename, FOURCC_I420, kWidth, kHeight,
+                        &frame1, ROTATION_0));
+  frame2.reset(static_cast<WebRtcVideoFrame*>(frame1.Copy()));
+  EXPECT_TRUE(IsEqual(frame1, *frame2.get(), 0));
+}
+
+TEST_F(WebRtcVideoFrameTest, CopyToBuffer) {
+  size_t out_size = kWidth * kHeight * 3 / 2;
+  talk_base::scoped_array<uint8> out(new uint8[out_size]);
+  WebRtcVideoFrame frame;
+  talk_base::scoped_ptr<talk_base::MemoryStream> ms(LoadSample(kImageFilename));
+  ASSERT_TRUE(ms.get() != NULL);
+  size_t data_size;
+  ASSERT_TRUE(ms->GetSize(&data_size));
+  EXPECT_TRUE(LoadFrame(reinterpret_cast<uint8*>(ms->GetBuffer()),
+                        data_size, FOURCC_I420,
+                        kWidth, kHeight, &frame, ROTATION_0));
+  for (int i = 0; i < repeat_; ++i) {
+    EXPECT_EQ(out_size, frame.CopyToBuffer(out.get(), out_size));
+  }
+  EXPECT_EQ(0, memcmp(out.get(), ms->GetBuffer(), out_size));
+}
+
+TEST_F(WebRtcVideoFrameTest, CopyToBuffer1Pixel) {
+  size_t out_size = 3;
+  talk_base::scoped_array<uint8> out(new uint8[out_size + 1]);
+  memset(out.get(), 0xfb, out_size + 1);  // Fill buffer
+  uint8 pixel[3] = { 1, 2, 3 };
+  WebRtcVideoFrame frame;
+  EXPECT_TRUE(LoadFrame(pixel, sizeof(pixel), FOURCC_I420,
+                        1, 1, &frame, ROTATION_0));
+  for (int i = 0; i < repeat_; ++i) {
+    EXPECT_EQ(out_size, frame.CopyToBuffer(out.get(), out_size));
+  }
+  EXPECT_EQ(1, out.get()[0]);  // Check Y.  Should be 1.
+  EXPECT_EQ(2, out.get()[1]);  // Check U.  Should be 2.
+  EXPECT_EQ(3, out.get()[2]);  // Check V.  Should be 3.
+  EXPECT_EQ(0xfb, out.get()[3]);  // Check sentinel is still intact.
+}
+
+// TODO: Merge this with the LmiVideoFrame test for more test cases
+// when they are supported.
diff --git a/talk/session/phone/webrtcvie.h b/talk/session/phone/webrtcvie.h
new file mode 100644
index 0000000..d6ef007
--- /dev/null
+++ b/talk/session/phone/webrtcvie.h
@@ -0,0 +1,144 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef TALK_SESSION_PHONE_WEBRTCVIE_H_
+#define TALK_SESSION_PHONE_WEBRTCVIE_H_
+
+#include "talk/base/common.h"
+#include "talk/session/phone/webrtccommon.h"
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "common_types.h"
+#include "modules/interface/module_common_types.h"
+#include "modules/video_capture/main/interface/video_capture.h"
+#include "modules/video_render/main/interface/video_render.h"
+#include "video_engine/main/interface/vie_base.h"
+#include "video_engine/main/interface/vie_capture.h"
+#include "video_engine/main/interface/vie_codec.h"
+#include "video_engine/main/interface/vie_errors.h"
+#include "video_engine/main/interface/vie_image_process.h"
+#include "video_engine/main/interface/vie_network.h"
+#include "video_engine/main/interface/vie_render.h"
+#include "video_engine/main/interface/vie_rtp_rtcp.h"
+#else
+#include "third_party/webrtc/files/include/common_types.h"
+#include "third_party/webrtc/files/include/module_common_types.h"
+#include "third_party/webrtc/files/include/video_capture.h"
+#include "third_party/webrtc/files/include/video_render.h"
+#include "third_party/webrtc/files/include/vie_base.h"
+#include "third_party/webrtc/files/include/vie_capture.h"
+#include "third_party/webrtc/files/include/vie_codec.h"
+#include "third_party/webrtc/files/include/vie_errors.h"
+#include "third_party/webrtc/files/include/vie_image_process.h"
+#include "third_party/webrtc/files/include/vie_network.h"
+#include "third_party/webrtc/files/include/vie_render.h"
+#include "third_party/webrtc/files/include/vie_rtp_rtcp.h"
+#endif  // WEBRTC_RELATIVE_PATH
+
+namespace cricket {
+
+// all tracing macros should go to a common file
+
+// automatically handles lifetime of VideoEngine
+class scoped_vie_engine {
+ public:
+  explicit scoped_vie_engine(webrtc::VideoEngine* e) : ptr(e) {}
+  // VERIFY, to ensure that there are no leaks at shutdown
+  ~scoped_vie_engine() {
+    if (ptr) {
+      webrtc::VideoEngine::Delete(ptr);
+    }
+  }
+  webrtc::VideoEngine* get() const { return ptr; }
+ private:
+  webrtc::VideoEngine* ptr;
+};
+
+// scoped_ptr class to handle obtaining and releasing VideoEngine
+// interface pointers
+template<class T> class scoped_vie_ptr {
+ public:
+  explicit scoped_vie_ptr(const scoped_vie_engine& e)
+       : ptr(T::GetInterface(e.get())) {}
+  explicit scoped_vie_ptr(T* p) : ptr(p) {}
+  ~scoped_vie_ptr() { if (ptr) ptr->Release(); }
+  T* operator->() const { return ptr; }
+  T* get() const { return ptr; }
+ private:
+  T* ptr;
+};
+
+// Utility class for aggregating the various WebRTC interface.
+// Fake implementations can also be injected for testing.
+class ViEWrapper {
+ public:
+  ViEWrapper()
+      : engine_(webrtc::VideoEngine::Create()),
+        base_(engine_), codec_(engine_), capture_(engine_),
+        network_(engine_), render_(engine_), rtp_(engine_),
+        image_(engine_) {
+  }
+
+  ViEWrapper(webrtc::ViEBase* base, webrtc::ViECodec* codec,
+             webrtc::ViECapture* capture, webrtc::ViENetwork* network,
+             webrtc::ViERender* render, webrtc::ViERTP_RTCP* rtp,
+             webrtc::ViEImageProcess* image)
+      : engine_(NULL),
+        base_(base),
+        codec_(codec),
+        capture_(capture),
+        network_(network),
+        render_(render),
+        rtp_(rtp),
+        image_(image) {
+  }
+
+  virtual ~ViEWrapper() {}
+  webrtc::VideoEngine* engine() { return engine_.get(); }
+  webrtc::ViEBase* base() { return base_.get(); }
+  webrtc::ViECodec* codec() { return codec_.get(); }
+  webrtc::ViECapture* capture() { return capture_.get(); }
+  webrtc::ViENetwork* network() { return network_.get(); }
+  webrtc::ViERender* render() { return render_.get(); }
+  webrtc::ViERTP_RTCP* rtp() { return rtp_.get(); }
+  webrtc::ViEImageProcess* sync() { return image_.get(); }
+  int error() { return base_->LastError(); }
+
+ private:
+  scoped_vie_engine engine_;
+  scoped_vie_ptr<webrtc::ViEBase> base_;
+  scoped_vie_ptr<webrtc::ViECodec> codec_;
+  scoped_vie_ptr<webrtc::ViECapture> capture_;
+  scoped_vie_ptr<webrtc::ViENetwork> network_;
+  scoped_vie_ptr<webrtc::ViERender> render_;
+  scoped_vie_ptr<webrtc::ViERTP_RTCP> rtp_;
+  scoped_vie_ptr<webrtc::ViEImageProcess> image_;
+};
+}
+
+#endif  // TALK_SESSION_PHONE_WEBRTCVIE_H_
diff --git a/talk/session/phone/webrtcvoe.h b/talk/session/phone/webrtcvoe.h
new file mode 100644
index 0000000..398928e
--- /dev/null
+++ b/talk/session/phone/webrtcvoe.h
@@ -0,0 +1,196 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef TALK_SESSION_PHONE_WEBRTCVOE_H_
+#define TALK_SESSION_PHONE_WEBRTCVOE_H_
+
+#include "talk/base/common.h"
+#include "talk/session/phone/webrtccommon.h"
+
+#ifdef WEBRTC_RELATIVE_PATH
+#include "common_types.h"
+#include "modules/audio_device/main/interface/audio_device.h"
+#include "voice_engine/main/interface/voe_audio_processing.h"
+#include "voice_engine/main/interface/voe_base.h"
+#include "voice_engine/main/interface/voe_codec.h"
+#include "voice_engine/main/interface/voe_dtmf.h"
+#include "voice_engine/main/interface/voe_errors.h"
+#include "voice_engine/main/interface/voe_external_media.h"
+#include "voice_engine/main/interface/voe_file.h"
+#include "voice_engine/main/interface/voe_hardware.h"
+#include "voice_engine/main/interface/voe_neteq_stats.h"
+#include "voice_engine/main/interface/voe_network.h"
+#include "voice_engine/main/interface/voe_rtp_rtcp.h"
+#include "voice_engine/main/interface/voe_video_sync.h"
+#include "voice_engine/main/interface/voe_volume_control.h"
+#else
+#include "third_party/webrtc/files/include/audio_device.h"
+#include "third_party/webrtc/files/include/common_types.h"
+#include "third_party/webrtc/files/include/voe_audio_processing.h"
+#include "third_party/webrtc/files/include/voe_base.h"
+#include "third_party/webrtc/files/include/voe_codec.h"
+#include "third_party/webrtc/files/include/voe_dtmf.h"
+#include "third_party/webrtc/files/include/voe_errors.h"
+#include "third_party/webrtc/files/include/voe_external_media.h"
+#include "third_party/webrtc/files/include/voe_file.h"
+#include "third_party/webrtc/files/include/voe_hardware.h"
+#include "third_party/webrtc/files/include/voe_neteq_stats.h"
+#include "third_party/webrtc/files/include/voe_network.h"
+#include "third_party/webrtc/files/include/voe_rtp_rtcp.h"
+#include "third_party/webrtc/files/include/voe_video_sync.h"
+#include "third_party/webrtc/files/include/voe_volume_control.h"
+#endif  // WEBRTC_RELATIVE_PATH
+
+namespace cricket {
+// automatically handles lifetime of WebRtc VoiceEngine
+class scoped_voe_engine {
+ public:
+  explicit scoped_voe_engine(webrtc::VoiceEngine* e) : ptr(e) {}
+  // VERIFY, to ensure that there are no leaks at shutdown
+  ~scoped_voe_engine() { if (ptr) VERIFY(webrtc::VoiceEngine::Delete(ptr)); }
+  // Releases the current pointer.
+  void reset() {
+    if (ptr) {
+      VERIFY(webrtc::VoiceEngine::Delete(ptr));
+      ptr = NULL;
+    }
+  }
+  webrtc::VoiceEngine* get() const { return ptr; }
+ private:
+  webrtc::VoiceEngine* ptr;
+};
+
+// scoped_ptr class to handle obtaining and releasing WebRTC interface pointers
+template<class T>
+class scoped_voe_ptr {
+ public:
+  explicit scoped_voe_ptr(const scoped_voe_engine& e)
+      : ptr(T::GetInterface(e.get())) {}
+  explicit scoped_voe_ptr(T* p) : ptr(p) {}
+  ~scoped_voe_ptr() { if (ptr) ptr->Release(); }
+  T* operator->() const { return ptr; }
+  T* get() const { return ptr; }
+
+  // Releases the current pointer.
+  void reset() {
+    if (ptr) {
+      ptr->Release();
+      ptr = NULL;
+    }
+  }
+
+ private:
+  T* ptr;
+};
+
+// Utility class for aggregating the various WebRTC interface.
+// Fake implementations can also be injected for testing.
+class VoEWrapper {
+ public:
+  VoEWrapper()
+      : engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
+        base_(engine_), codec_(engine_), dtmf_(engine_), file_(engine_),
+        hw_(engine_), media_(engine_), neteq_(engine_), network_(engine_),
+        rtp_(engine_), sync_(engine_), volume_(engine_) {
+  }
+  VoEWrapper(webrtc::VoEAudioProcessing* processing,
+             webrtc::VoEBase* base,
+             webrtc::VoECodec* codec,
+             webrtc::VoEDtmf* dtmf,
+             webrtc::VoEFile* file,
+             webrtc::VoEHardware* hw,
+             webrtc::VoEExternalMedia* media,
+             webrtc::VoENetEqStats* neteq,
+             webrtc::VoENetwork* network,
+             webrtc::VoERTP_RTCP* rtp,
+             webrtc::VoEVideoSync* sync,
+             webrtc::VoEVolumeControl* volume)
+      : engine_(NULL),
+        processing_(processing),
+        base_(base),
+        codec_(codec),
+        dtmf_(dtmf),
+        file_(file),
+        hw_(hw),
+        media_(media),
+        neteq_(neteq),
+        network_(network),
+        rtp_(rtp),
+        sync_(sync),
+        volume_(volume) {
+  }
+  ~VoEWrapper() {}
+  webrtc::VoiceEngine* engine() const { return engine_.get(); }
+  webrtc::VoEAudioProcessing* processing() const { return processing_.get(); }
+  webrtc::VoEBase* base() const { return base_.get(); }
+  webrtc::VoECodec* codec() const { return codec_.get(); }
+  webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
+  webrtc::VoEFile* file() const { return file_.get(); }
+  webrtc::VoEHardware* hw() const { return hw_.get(); }
+  webrtc::VoEExternalMedia* media() const { return media_.get(); }
+  webrtc::VoENetEqStats* neteq() const { return neteq_.get(); }
+  webrtc::VoENetwork* network() const { return network_.get(); }
+  webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
+  webrtc::VoEVideoSync* sync() const { return sync_.get(); }
+  webrtc::VoEVolumeControl* volume() const { return volume_.get(); }
+  int error() { return base_->LastError(); }
+
+ private:
+  scoped_voe_engine engine_;
+  scoped_voe_ptr<webrtc::VoEAudioProcessing> processing_;
+  scoped_voe_ptr<webrtc::VoEBase> base_;
+  scoped_voe_ptr<webrtc::VoECodec> codec_;
+  scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
+  scoped_voe_ptr<webrtc::VoEFile> file_;
+  scoped_voe_ptr<webrtc::VoEHardware> hw_;
+  scoped_voe_ptr<webrtc::VoEExternalMedia> media_;
+  scoped_voe_ptr<webrtc::VoENetEqStats> neteq_;
+  scoped_voe_ptr<webrtc::VoENetwork> network_;
+  scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
+  scoped_voe_ptr<webrtc::VoEVideoSync> sync_;
+  scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
+};
+
+// Adds indirection to static WebRtc functions, allowing them to be mocked.
+class VoETraceWrapper {
+ public:
+  virtual ~VoETraceWrapper() {}
+
+  virtual int SetTraceFilter(const unsigned int filter) {
+    return webrtc::VoiceEngine::SetTraceFilter(filter);
+  }
+  virtual int SetTraceFile(const char* fileNameUTF8) {
+    return webrtc::VoiceEngine::SetTraceFile(fileNameUTF8);
+  }
+  virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
+    return webrtc::VoiceEngine::SetTraceCallback(callback);
+  }
+};
+}
+
+#endif  // TALK_SESSION_PHONE_WEBRTCVOE_H_
diff --git a/talk/session/phone/webrtcvoiceengine.cc b/talk/session/phone/webrtcvoiceengine.cc
new file mode 100644
index 0000000..a53dfc1
--- /dev/null
+++ b/talk/session/phone/webrtcvoiceengine.cc
@@ -0,0 +1,1988 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifdef HAVE_WEBRTC_VOICE
+
+#include "talk/session/phone/webrtcvoiceengine.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <string>
+#include <vector>
+
+#include "talk/base/base64.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/common.h"
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/session/phone/webrtcvoe.h"
+
+#ifdef WIN32
+#include <objbase.h>  // NOLINT
+#endif
+
+namespace cricket {
+
+// For Linux/Mac, using the default device is done by specifying index 0 for
+// VoE 4.0 and not -1 (which was the case for VoE 3.5).
+//
+// On Windows Vista and newer, Microsoft introduced the concept of "Default
+// Communications Device". This means that there are two types of default
+// devices (old Wave Audio style default and Default Communications Device).
+//
+// On Windows systems which only support Wave Audio style default, uses either
+// -1 or 0 to select the default device.
+//
+// On Windows systems which support both "Default Communication Device" and
+// old Wave Audio style default, use -1 for Default Communications Device and
+// -2 for Wave Audio style default, which is what we want to use for clips.
+// It's not clear yet whether the -2 index is handled properly on other OSes.
+
+#ifdef WIN32
+static const int kDefaultAudioDeviceId = -1;
+static const int kDefaultSoundclipDeviceId = -2;
+#else
+static const int kDefaultAudioDeviceId = 0;
+#endif
+
+// extension header for audio levels, as defined in
+// http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-03
+static const char kRtpAudioLevelHeaderExtension[] =
+    "urn:ietf:params:rtp-hdrext:ssrc-audio-level";
+
+static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
+  const char* delim = "\r\n";
+  for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
+    LOG_V(sev) << tok;
+  }
+}
+
+static const char kL16CodecName[] = "L16";
+
+// WebRtcVoiceEngine
+const WebRtcVoiceEngine::CodecPref WebRtcVoiceEngine::kCodecPrefs[] = {
+  { "ISAC",   16000 },
+  { "ISAC",   32000 },
+  { "speex",  16000 },
+  { "G722",   16000 },
+  { "iLBC",   8000 },
+  { "speex",  8000 },
+  { "PCMU",   8000 },
+  { "PCMA",   8000 },
+  { "CN",     32000 },
+  { "CN",     16000 },
+  { "CN",     8000 },
+  { "red",    8000 },
+  { "telephone-event", 8000 },
+};
+
+class WebRtcSoundclipMedia : public SoundclipMedia {
+ public:
+  explicit WebRtcSoundclipMedia(WebRtcVoiceEngine *engine)
+      : engine_(engine), webrtc_channel_(-1) {
+    engine_->RegisterSoundclip(this);
+  }
+
+  virtual ~WebRtcSoundclipMedia() {
+    engine_->UnregisterSoundclip(this);
+    if (webrtc_channel_ != -1) {
+      if (engine_->voe_sc()->base()->DeleteChannel(webrtc_channel_)
+          == -1) {
+        LOG_RTCERR1(DeleteChannel, webrtc_channel_);
+      }
+    }
+  }
+
+  bool Init() {
+    webrtc_channel_ = engine_->voe_sc()->base()->CreateChannel();
+    if (webrtc_channel_ == -1) {
+      LOG_RTCERR0(CreateChannel);
+      return false;
+    }
+    return true;
+  }
+
+  bool Enable() {
+    if (engine_->voe_sc()->base()->StartPlayout(webrtc_channel_) == -1) {
+      LOG_RTCERR1(StartPlayout, webrtc_channel_);
+      return false;
+    }
+    return true;
+  }
+
+  bool Disable() {
+    if (engine_->voe_sc()->base()->StopPlayout(webrtc_channel_) == -1) {
+      LOG_RTCERR1(StopPlayout, webrtc_channel_);
+      return false;
+    }
+    return true;
+  }
+
+  virtual bool PlaySound(const char *buf, int len, int flags) {
+    // Must stop playing the current sound (if any), because we are about to
+    // modify the stream.
+    if (engine_->voe_sc()->file()->StopPlayingFileLocally(webrtc_channel_)
+        == -1) {
+      LOG_RTCERR1(StopPlayingFileLocally, webrtc_channel_);
+      return false;
+    }
+
+    if (buf) {
+      stream_.reset(new WebRtcSoundclipStream(buf, len));
+      stream_->set_loop((flags & SF_LOOP) != 0);
+      stream_->Rewind();
+
+      // Play it.
+      if (engine_->voe_sc()->file()->StartPlayingFileLocally(
+          webrtc_channel_, stream_.get()) == -1) {
+        LOG_RTCERR2(StartPlayingFileLocally, webrtc_channel_, stream_.get());
+        LOG(LS_ERROR) << "Unable to start soundclip";
+        return false;
+      }
+    } else {
+      stream_.reset();
+    }
+    return true;
+  }
+
+  int GetLastEngineError() const { return engine_->voe_sc()->error(); }
+
+ private:
+  WebRtcVoiceEngine *engine_;
+  int webrtc_channel_;
+  talk_base::scoped_ptr<WebRtcSoundclipStream> stream_;
+};
+
+WebRtcVoiceEngine::WebRtcVoiceEngine()
+    : voe_wrapper_(new VoEWrapper()),
+      voe_wrapper_sc_(new VoEWrapper()),
+      tracing_(new VoETraceWrapper()),
+      adm_(NULL),
+      adm_sc_(NULL),
+      log_level_(kDefaultLogSeverity),
+      is_dumping_aec_(false),
+      desired_local_monitor_enable_(false) {
+  Construct();
+}
+
+WebRtcVoiceEngine::WebRtcVoiceEngine(webrtc::AudioDeviceModule* adm,
+                                     webrtc::AudioDeviceModule* adm_sc)
+    : voe_wrapper_(new VoEWrapper()),
+      voe_wrapper_sc_(new VoEWrapper()),
+      tracing_(new VoETraceWrapper()),
+      adm_(adm),
+      adm_sc_(adm_sc),
+      log_level_(kDefaultLogSeverity),
+      is_dumping_aec_(false),
+      desired_local_monitor_enable_(false) {
+  Construct();
+}
+
+WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
+                                     VoEWrapper* voe_wrapper_sc,
+                                     VoETraceWrapper* tracing)
+    : voe_wrapper_(voe_wrapper),
+      voe_wrapper_sc_(voe_wrapper_sc),
+      tracing_(tracing),
+      adm_(NULL),
+      adm_sc_(NULL),
+      log_level_(kDefaultLogSeverity),
+      is_dumping_aec_(false),
+      desired_local_monitor_enable_(false) {
+  Construct();
+}
+
+void WebRtcVoiceEngine::Construct() {
+  initialized_ = false;
+  LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
+  ApplyLogging("");
+  if (tracing_->SetTraceCallback(this) == -1) {
+    LOG_RTCERR0(SetTraceCallback);
+  }
+  if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
+    LOG_RTCERR0(RegisterVoiceEngineObserver);
+  }
+  // Clear the default agc state.
+  memset(&default_agc_config_, 0, sizeof(default_agc_config_));
+
+  // Load our audio codec list
+  LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
+  int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
+  for (int i = 0; i < ncodecs; ++i) {
+    webrtc::CodecInst gcodec;
+    if (voe_wrapper_->codec()->GetCodec(i, gcodec) >= 0) {
+      // Skip the codecs that we don't support.
+      if (strcmp(gcodec.plname, kL16CodecName) == 0) {
+        continue;
+      }
+      int pref = GetCodecPreference(gcodec.plname, gcodec.plfreq);
+      if (pref != -1) {
+        if (gcodec.rate == -1) gcodec.rate = 0;
+        AudioCodec codec(gcodec.pltype, gcodec.plname, gcodec.plfreq,
+                         gcodec.rate, gcodec.channels, pref);
+        LOG(LS_INFO) << gcodec.plname << "/" << gcodec.plfreq << "/" \
+                     << gcodec.channels << " " << gcodec.pltype;
+        codecs_.push_back(codec);
+      }
+    }
+  }
+  // Make sure they are in local preference order
+  std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
+}
+
+WebRtcVoiceEngine::~WebRtcVoiceEngine() {
+  LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
+  if (voe_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
+    LOG_RTCERR0(DeRegisterVoiceEngineObserver);
+  }
+  if (adm_) {
+    voe_wrapper_.reset();
+    webrtc::AudioDeviceModule::Destroy(adm_);
+    adm_ = NULL;
+  }
+  if (adm_sc_) {
+    voe_wrapper_sc_.reset();
+    webrtc::AudioDeviceModule::Destroy(adm_sc_);
+    adm_sc_ = NULL;
+  }
+
+  tracing_->SetTraceCallback(NULL);
+}
+
+bool WebRtcVoiceEngine::Init() {
+  LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
+  bool res = InitInternal();
+  if (res) {
+    LOG(LS_INFO) << "WebRtcVoiceEngine::Init Done!";
+  } else {
+    LOG(LS_ERROR) << "WebRtcVoiceEngine::Init failed";
+    Terminate();
+  }
+  return res;
+}
+
+bool WebRtcVoiceEngine::InitInternal() {
+  // Temporarily turn logging level up for the Init call
+  int old_level = log_level_;
+  log_level_ = talk_base::_min(log_level_,
+                               static_cast<int>(talk_base::LS_INFO));
+  ApplyLogging("");
+
+  if (adm_) {
+    if (voe_wrapper_->base()->RegisterAudioDeviceModule(*adm_) == -1) {
+      LOG_RTCERR0_EX(Init, voe_wrapper_->error());
+      return false;
+    }
+  }
+  if (adm_sc_) {
+    if (voe_wrapper_sc_->base()->RegisterAudioDeviceModule(*adm_sc_) == -1) {
+      LOG_RTCERR0_EX(Init, voe_wrapper_sc_->error());
+      return false;
+    }
+  }
+
+  // Init WebRtc VoiceEngine, enabling AEC logging if specified in SetLogging.
+  if (voe_wrapper_->base()->Init() == -1) {
+    LOG_RTCERR0_EX(Init, voe_wrapper_->error());
+    return false;
+  }
+
+  // Restore the previous log level and apply the log filter.
+  log_level_ = old_level;
+  ApplyLogging(log_filter_);
+
+  // Log the VoiceEngine version info
+  char buffer[1024] = "";
+  voe_wrapper_->base()->GetVersion(buffer);
+  LOG(LS_INFO) << "WebRtc VoiceEngine Version:";
+  LogMultiline(talk_base::LS_INFO, buffer);
+
+  // Turn on AEC and AGC by default.
+  if (!SetOptions(
+      MediaEngineInterface::ECHO_CANCELLATION |
+      MediaEngineInterface::AUTO_GAIN_CONTROL)) {
+    return false;
+  }
+
+  // Save the default AGC configuration settings.
+  if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) {
+    LOG_RTCERR0(GetAGCConfig);
+    return false;
+  }
+
+  // Print our codec list again for the call diagnostic log
+  LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
+  for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
+      it != codecs_.end(); ++it) {
+    LOG(LS_INFO) << it->name << "/" << it->clockrate << "/"
+              << it->channels << " " << it->id;
+  }
+
+#if defined(LINUX) && !defined(HAVE_LIBPULSE)
+  voe_wrapper_sc_->hw()->SetAudioDeviceLayer(webrtc::kAudioLinuxAlsa);
+#endif
+
+  // Initialize the VoiceEngine instance that we'll use to play out sound clips.
+  if (voe_wrapper_sc_->base()->Init() == -1) {
+    LOG_RTCERR0_EX(Init, voe_wrapper_sc_->error());
+    return false;
+  }
+
+  // On Windows, tell it to use the default sound (not communication) devices.
+  // First check whether there is a valid sound device for playback.
+  // TODO: Clean this up when we support setting the soundclip device.
+#ifdef WIN32
+  int num_of_devices = 0;
+  if (voe_wrapper_sc_->hw()->GetNumOfPlayoutDevices(num_of_devices) != -1 &&
+      num_of_devices > 0) {
+    if (voe_wrapper_sc_->hw()->SetPlayoutDevice(kDefaultSoundclipDeviceId)
+        == -1) {
+      LOG_RTCERR1_EX(SetPlayoutDevice, kDefaultSoundclipDeviceId,
+                      voe_wrapper_sc_->error());
+      return false;
+    }
+  } else {
+    LOG(LS_WARNING) << "No valid sound playout device found.";
+  }
+#endif
+
+  initialized_ = true;
+  return true;
+}
+
+void WebRtcVoiceEngine::Terminate() {
+  LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
+  initialized_ = false;
+
+  if (is_dumping_aec_) {
+    if (voe_wrapper_->processing()->StopDebugRecording() == -1) {
+      LOG_RTCERR0(StopDebugRecording);
+    }
+    is_dumping_aec_ = false;
+  }
+
+  voe_wrapper_sc_->base()->Terminate();
+  voe_wrapper_->base()->Terminate();
+  desired_local_monitor_enable_ = false;
+}
+
+int WebRtcVoiceEngine::GetCapabilities() {
+  return AUDIO_SEND | AUDIO_RECV;
+}
+
+VoiceMediaChannel *WebRtcVoiceEngine::CreateChannel() {
+  WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this);
+  if (!ch->valid()) {
+    delete ch;
+    ch = NULL;
+  }
+  return ch;
+}
+
+SoundclipMedia *WebRtcVoiceEngine::CreateSoundclip() {
+  WebRtcSoundclipMedia *soundclip = new WebRtcSoundclipMedia(this);
+  if (!soundclip->Init() || !soundclip->Enable()) {
+    delete soundclip;
+    return NULL;
+  }
+  return soundclip;
+}
+
+bool WebRtcVoiceEngine::SetOptions(int options) {
+  // NS and typing detection are always on, if supported.
+  bool aec = (options & MediaEngineInterface::ECHO_CANCELLATION) ? true : false;
+  bool agc = (options & MediaEngineInterface::AUTO_GAIN_CONTROL) ? true : false;
+#if !defined(IOS) && !defined(ANDROID)
+  if (voe_wrapper_->processing()->SetEcStatus(aec) == -1) {
+    LOG_RTCERR1(SetEcStatus, aec);
+    return false;
+  }
+
+  if (voe_wrapper_->processing()->SetAgcStatus(agc) == -1) {
+    LOG_RTCERR1(SetAgcStatus, agc);
+    return false;
+  }
+
+  if (voe_wrapper_->processing()->SetNsStatus(true) == -1) {
+    LOG_RTCERR1(SetNsStatus, true);
+    return false;
+  }
+
+  if (voe_wrapper_->processing()->SetTypingDetectionStatus(true) == -1) {
+    // In case of error, log the info and continue
+    LOG_RTCERR1(SetTypingDetectionStatus, true);
+  }
+#else
+  if (voe_wrapper_->processing()->SetEcStatus(aec, kEcAecm) == -1) {
+    LOG_RTCERR2(SetEcStatus, aec, kEcAecm);
+    return false;
+  }
+
+  if (aec) {
+    // Use speakerphone mode with comfort noise generation for mobile.
+    if (voe_wrapper_->processing()->SetAecmMode(kAecmSpeakerphone, true) != 0) {
+      LOG_RTCERR2(SetAecmMode, kAecmSpeakerphone, true);
+    }
+  }
+
+  // On mobile, GIPS recommends fixed AGC (not adaptive)
+  if (voe_wrapper_->processing()->SetAgcStatus(agc, kAgcFixedDigital) == -1) {
+    LOG_RTCERR2(SetAgcStatus, agc, kAgcFixedDigital);
+    return false;
+  }
+
+  // On mobile, GIPS recommends moderate aggressiveness.
+  if (voe_wrapper_->processing()->SetNsStatus(true,
+      kNsModerateSuppression) == -1) {
+    LOG_RTCERR2(SetNsStatus, ns, kNsModerateSuppression);
+    return false;
+  }
+
+  // No typing detection support on iOS or Android.
+#endif // !IOS && !ANDROID
+
+  return true;
+}
+
+struct ResumeEntry {
+  ResumeEntry(WebRtcVoiceMediaChannel *c, bool p, SendFlags s)
+      : channel(c),
+        playout(p),
+        send(s) {
+  }
+
+  WebRtcVoiceMediaChannel *channel;
+  bool playout;
+  SendFlags send;
+};
+
+// TODO: Refactor this so that the core logic can be used to set the
+// soundclip device. At that time, reinstate the soundclip pause/resume code.
+bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
+                                   const Device* out_device) {
+#if !defined(IOS) && !defined(ANDROID)
+  int in_id = in_device ? talk_base::FromString<int>(in_device->id) :
+      kDefaultAudioDeviceId;
+  int out_id = out_device ? talk_base::FromString<int>(out_device->id) :
+      kDefaultAudioDeviceId;
+  // The device manager uses -1 as the default device, which was the case for
+  // VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
+#ifndef WIN32
+  if (-1 == in_id) {
+    in_id = kDefaultAudioDeviceId;
+  }
+  if (-1 == out_id) {
+    out_id = kDefaultAudioDeviceId;
+  }
+#endif
+
+  std::string in_name = (in_id != kDefaultAudioDeviceId) ?
+      in_device->name : "Default device";
+  std::string out_name = (out_id != kDefaultAudioDeviceId) ?
+      out_device->name : "Default device";
+  LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
+            << ") and speaker to (id=" << out_id << ", name=" << out_name
+            << ")";
+
+  // If we're running the local monitor, we need to stop it first.
+  bool ret = true;
+  if (!PauseLocalMonitor()) {
+    LOG(LS_WARNING) << "Failed to pause local monitor";
+    ret = false;
+  }
+
+  // Must also pause all audio playback and capture.
+  for (ChannelList::const_iterator i = channels_.begin();
+       i != channels_.end(); ++i) {
+    WebRtcVoiceMediaChannel *channel = *i;
+    if (!channel->PausePlayout()) {
+      LOG(LS_WARNING) << "Failed to pause playout";
+      ret = false;
+    }
+    if (!channel->PauseSend()) {
+      LOG(LS_WARNING) << "Failed to pause send";
+      ret = false;
+    }
+  }
+
+  // Find the recording device id in VoiceEngine and set recording device.
+  if (!FindWebRtcAudioDeviceId(true, in_name, in_id, &in_id)) {
+    ret = false;
+  }
+  if (ret) {
+    if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
+      LOG_RTCERR2(SetRecordingDevice, in_device->name, in_id);
+      ret = false;
+    }
+  }
+
+  // Find the playout device id in VoiceEngine and set playout device.
+  if (!FindWebRtcAudioDeviceId(false, out_name, out_id, &out_id)) {
+    LOG(LS_WARNING) << "Failed to find VoiceEngine device id for " << out_name;
+    ret = false;
+  }
+  if (ret) {
+    if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
+      LOG_RTCERR2(SetPlayoutDevice, out_device->name, out_id);
+      ret = false;
+    }
+  }
+
+  // Resume all audio playback and capture.
+  for (ChannelList::const_iterator i = channels_.begin();
+       i != channels_.end(); ++i) {
+    WebRtcVoiceMediaChannel *channel = *i;
+    if (!channel->ResumePlayout()) {
+      LOG(LS_WARNING) << "Failed to resume playout";
+      ret = false;
+    }
+    if (!channel->ResumeSend()) {
+      LOG(LS_WARNING) << "Failed to resume send";
+      ret = false;
+    }
+  }
+
+  // Resume local monitor.
+  if (!ResumeLocalMonitor()) {
+    LOG(LS_WARNING) << "Failed to resume local monitor";
+    ret = false;
+  }
+
+  if (ret) {
+    LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
+                 << ") and speaker to (id="<< out_id << " name=" << out_name
+                 << ")";
+  }
+
+  return ret;
+#else
+  return true;
+#endif  // !IOS && !ANDROID
+}
+
+bool WebRtcVoiceEngine::FindWebRtcAudioDeviceId(
+  bool is_input, const std::string& dev_name, int dev_id, int* rtc_id) {
+  // In Linux, VoiceEngine uses the same device dev_id as the device manager.
+#ifdef LINUX
+  *rtc_id = dev_id;
+  return true;
+#else
+  // In Windows and Mac, we need to find the VoiceEngine device id by name
+  // unless the input dev_id is the default device id.
+  if (kDefaultAudioDeviceId == dev_id) {
+    *rtc_id = dev_id;
+    return true;
+  }
+
+  // Get the number of VoiceEngine audio devices.
+  int count = 0;
+  if (is_input) {
+    if (-1 == voe_wrapper_->hw()->GetNumOfRecordingDevices(count)) {
+      LOG_RTCERR0(GetNumOfRecordingDevices);
+      return false;
+    }
+  } else {
+    if (-1 == voe_wrapper_->hw()->GetNumOfPlayoutDevices(count)) {
+      LOG_RTCERR0(GetNumOfPlayoutDevices);
+      return false;
+    }
+  }
+
+  for (int i = 0; i < count; ++i) {
+    char name[128];
+    char guid[128];
+    if (is_input) {
+      voe_wrapper_->hw()->GetRecordingDeviceName(i, name, guid);
+      LOG(LS_VERBOSE) << "VoiceEngine microphone " << i << ": " << name;
+    } else {
+      voe_wrapper_->hw()->GetPlayoutDeviceName(i, name, guid);
+      LOG(LS_VERBOSE) << "VoiceEngine speaker " << i << ": " << name;
+    }
+
+    std::string webrtc_name(name);
+    if (dev_name.compare(0, webrtc_name.size(), webrtc_name) == 0) {
+      *rtc_id = i;
+      return true;
+    }
+  }
+  LOG(LS_WARNING) << "VoiceEngine cannot find device: " << dev_name;
+  return false;
+#endif
+}
+
+bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
+  unsigned int ulevel;
+  if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) {
+    LOG_RTCERR1(GetSpeakerVolume, level);
+    return false;
+  }
+  *level = ulevel;
+  return true;
+}
+
+bool WebRtcVoiceEngine::SetOutputVolume(int level) {
+  ASSERT(level >= 0 && level <= 255);
+  if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
+    LOG_RTCERR1(SetSpeakerVolume, level);
+    return false;
+  }
+  return true;
+}
+
+int WebRtcVoiceEngine::GetInputLevel() {
+  unsigned int ulevel;
+  return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ?
+      static_cast<int>(ulevel) : -1;
+}
+
+bool WebRtcVoiceEngine::SetLocalMonitor(bool enable) {
+  desired_local_monitor_enable_ = enable;
+  return ChangeLocalMonitor(desired_local_monitor_enable_);
+}
+
+bool WebRtcVoiceEngine::ChangeLocalMonitor(bool enable) {
+  if (enable && !monitor_.get()) {
+    monitor_.reset(new WebRtcMonitorStream);
+    if (voe_wrapper_->file()->StartRecordingMicrophone(monitor_.get()) == -1) {
+      LOG_RTCERR1(StartRecordingMicrophone, monitor_.get());
+      // Must call Stop() because there are some cases where Start will report
+      // failure but still change the state, and if we leave VE in the on state
+      // then it could crash later when trying to invoke methods on our monitor.
+      voe_wrapper_->file()->StopRecordingMicrophone();
+      monitor_.reset();
+      return false;
+    }
+  } else if (!enable && monitor_.get()) {
+    voe_wrapper_->file()->StopRecordingMicrophone();
+    monitor_.reset();
+  }
+  return true;
+}
+
+bool WebRtcVoiceEngine::PauseLocalMonitor() {
+  return ChangeLocalMonitor(false);
+}
+
+bool WebRtcVoiceEngine::ResumeLocalMonitor() {
+  return ChangeLocalMonitor(desired_local_monitor_enable_);
+}
+
+const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() {
+  return codecs_;
+}
+
+bool WebRtcVoiceEngine::FindCodec(const AudioCodec& in) {
+  return FindWebRtcCodec(in, NULL);
+}
+
+bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
+                                        webrtc::CodecInst* out) {
+  int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
+  for (int i = 0; i < ncodecs; ++i) {
+    webrtc::CodecInst gcodec;
+    if (voe_wrapper_->codec()->GetCodec(i, gcodec) >= 0) {
+      AudioCodec codec(gcodec.pltype, gcodec.plname,
+                       gcodec.plfreq, gcodec.rate, gcodec.channels, 0);
+      if (codec.Matches(in)) {
+        if (out) {
+          // If the codec is VBR and an explicit rate is specified, use it.
+          if (in.bitrate != 0 && gcodec.rate == -1) {
+            gcodec.rate = in.bitrate;
+          }
+          *out = gcodec;
+        }
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
+  // if min_sev == -1, we keep the current log level.
+  if (min_sev >= 0) {
+    log_level_ = min_sev;
+  }
+  log_filter_ = filter;
+  ApplyLogging(initialized_ ? log_filter_ : "");
+}
+
+int WebRtcVoiceEngine::GetLastEngineError() {
+  return voe_wrapper_->error();
+}
+
+// We suppport three different logging settings for VoiceEngine:
+// 1. Observer callback that goes into talk diagnostic logfile.
+//    Use --logfile and --loglevel
+//
+// 2. Encrypted VoiceEngine log for debugging VoiceEngine.
+//    Use --voice_loglevel --voice_logfilter "tracefile file_name"
+//
+// 3. EC log and dump for debugging QualityEngine.
+//    Use --voice_loglevel --voice_logfilter "recordEC file_name"
+//
+// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
+//    Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
+void WebRtcVoiceEngine::ApplyLogging(const std::string& log_filter) {
+  // Set log level.
+  int filter = 0;
+  switch (log_level_) {
+    case talk_base::LS_VERBOSE:
+      filter |= webrtc::kTraceAll;      // fall through
+    case talk_base::LS_INFO:
+      filter |= webrtc::kTraceStateInfo;  // fall through
+    case talk_base::LS_WARNING:
+      filter |= (webrtc::kTraceInfo | webrtc::kTraceWarning);  // fall through
+    case talk_base::LS_ERROR:
+      filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
+  }
+  tracing_->SetTraceFilter(filter);
+
+  // Set encrypted trace file.
+  std::vector<std::string> opts;
+  talk_base::tokenize(log_filter, ' ', '"', '"', &opts);
+  std::vector<std::string>::iterator tracefile =
+      std::find(opts.begin(), opts.end(), "tracefile");
+  if (tracefile != opts.end() && ++tracefile != opts.end()) {
+    // Write encrypted debug output (at same loglevel) to file
+    // EncryptedTraceFile no longer supported.
+    if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
+      LOG_RTCERR1(SetTraceFile, *tracefile);
+    }
+  }
+
+  // Set AEC dump file
+  std::vector<std::string>::iterator recordEC =
+      std::find(opts.begin(), opts.end(), "recordEC");
+  if (recordEC != opts.end()) {
+    ++recordEC;
+    if (recordEC != opts.end() && !is_dumping_aec_) {
+      // Start dumping AEC when we are not dumping and recordEC has a filename.
+      if (voe_wrapper_->processing()->StartDebugRecording(
+          recordEC->c_str()) == -1) {
+        LOG_RTCERR0(StartDebugRecording);
+      } else {
+        is_dumping_aec_ = true;
+      }
+    } else if (recordEC == opts.end() && is_dumping_aec_) {
+      // Stop dumping EC when we are dumping and recordEC has no filename.
+      if (voe_wrapper_->processing()->StopDebugRecording() == -1) {
+        LOG_RTCERR0(StopDebugRecording);
+      }
+      is_dumping_aec_ = false;
+    }
+  }
+}
+
+// Ignore spammy trace messages, mostly from the stats API when we haven't
+// gotten RTCP info yet from the remote side.
+static bool ShouldIgnoreTrace(const std::string& trace) {
+  static const char* kTracesToIgnore[] = {
+    "\tfailed to GetReportBlockInformation",
+    "GetRecCodec() failed to get received codec",
+    "GetRemoteRTCPData() failed to retrieve sender info for remote side",
+    "GetRTPStatistics() failed to measure RTT since no RTP packets have been received yet",  // NOLINT
+    "GetRTPStatistics() failed to read RTP statistics from the RTP/RTCP module",
+    "GetRTPStatistics() failed to retrieve RTT from the RTP/RTCP module",
+    "RTCPReceiver::SenderInfoReceived No received SR",
+    "StatisticsRTP() no statisitics availble",
+    NULL
+  };
+  for (const char* const* p = kTracesToIgnore; *p; ++p) {
+    if (trace.find(*p) == 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void WebRtcVoiceEngine::Print(const webrtc::TraceLevel level,
+                              const char* trace, const int length) {
+  talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
+  if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
+    sev = talk_base::LS_ERROR;
+  else if (level == webrtc::kTraceWarning)
+    sev = talk_base::LS_WARNING;
+  else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
+    sev = talk_base::LS_INFO;
+
+  if (sev >= log_level_) {
+    // Skip past boilerplate prefix text
+    if (length < 72) {
+      std::string msg(trace, length);
+      LOG(LS_ERROR) << "Malformed webrtc log message: ";
+      LOG_V(sev) << msg;
+    } else {
+      std::string msg(trace + 71, length - 72);
+      if (!ShouldIgnoreTrace(msg)) {
+        LOG_V(sev) << "WebRtc VoE:" << msg;
+      }
+    }
+  }
+}
+
+void WebRtcVoiceEngine::CallbackOnError(const int channel_num,
+                                        const int err_code) {
+  talk_base::CritScope lock(&channels_cs_);
+  WebRtcVoiceMediaChannel* channel = NULL;
+  uint32 ssrc = 0;
+  LOG(LS_WARNING) << "VoiceEngine error " << err_code << " reported on channel "
+               << channel_num << ".";
+  if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
+    ASSERT(channel != NULL);
+    channel->OnError(ssrc, err_code);
+  } else {
+    LOG(LS_ERROR) << "VoiceEngine channel " << channel_num
+        << " could not be found in the channel list when error reported.";
+  }
+}
+
+int WebRtcVoiceEngine::GetCodecPreference(const char *name, int clockrate) {
+  for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
+    if ((strcmp(kCodecPrefs[i].name, name) == 0) &&
+        (kCodecPrefs[i].clockrate == clockrate))
+      return ARRAY_SIZE(kCodecPrefs) - i;
+  }
+  LOG(LS_WARNING) << "Unexpected codec \"" << name << "/" << clockrate << "\"";
+  return -1;
+}
+
+bool WebRtcVoiceEngine::FindChannelAndSsrc(
+    int channel_num, WebRtcVoiceMediaChannel** channel, uint32* ssrc) const {
+  ASSERT(channel != NULL && ssrc != NULL);
+
+  *channel = NULL;
+  *ssrc = 0;
+  // Find corresponding channel and ssrc
+  for (ChannelList::const_iterator it = channels_.begin();
+      it != channels_.end(); ++it) {
+    ASSERT(*it != NULL);
+    if ((*it)->FindSsrc(channel_num, ssrc)) {
+      *channel = *it;
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel *channel) {
+  talk_base::CritScope lock(&channels_cs_);
+  channels_.push_back(channel);
+}
+
+void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel *channel) {
+  talk_base::CritScope lock(&channels_cs_);
+  ChannelList::iterator i = std::find(channels_.begin(),
+                                      channels_.end(),
+                                      channel);
+  if (i != channels_.end()) {
+    channels_.erase(i);
+  }
+}
+
+void WebRtcVoiceEngine::RegisterSoundclip(WebRtcSoundclipMedia *soundclip) {
+  soundclips_.push_back(soundclip);
+}
+
+void WebRtcVoiceEngine::UnregisterSoundclip(WebRtcSoundclipMedia *soundclip) {
+  SoundclipList::iterator i = std::find(soundclips_.begin(),
+                                        soundclips_.end(),
+                                        soundclip);
+  if (i != soundclips_.end()) {
+    soundclips_.erase(i);
+  }
+}
+
+// Adjusts the default AGC target level by the specified delta.
+// NB: If we start messing with other config fields, we'll want
+// to save the current webrtc::AgcConfig as well.
+bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
+  webrtc::AgcConfig config = default_agc_config_;
+  config.targetLeveldBOv += delta;
+
+  LOG(LS_INFO) << "Adjusting AGC level from default -"
+               << default_agc_config_.targetLeveldBOv << "dB to -"
+               << config.targetLeveldBOv << "dB";
+
+  if (voe_wrapper_->processing()->SetAgcConfig(config) == -1) {
+    LOG_RTCERR1(SetAgcConfig, config.targetLeveldBOv);
+    return false;
+  }
+  return true;
+}
+
+// Configures echo cancellation and noise suppression modes according to
+// whether or not we are in a multi-point conference.
+bool WebRtcVoiceEngine::SetConferenceMode(bool enable) {
+// Only use EC_AECM for mobile.
+#if defined(IOS) || defined(ANDROID)
+  return true;
+#endif
+
+  LOG(LS_INFO) << (enable ? "Enabling" : "Disabling")
+               << " Conference Mode noise reduction";
+
+  // We always configure noise suppression on, so just toggle the mode.
+  const webrtc::NsModes ns_mode = enable ? webrtc::kNsConference
+                                         : webrtc::kNsDefault;
+  if (voe_wrapper_->processing()->SetNsStatus(true, ns_mode) == -1) {
+    LOG_RTCERR2(SetNsStatus, true, ns_mode);
+    return false;
+  }
+
+  // Echo-cancellation is a user-option, so preserve the enable state and
+  // just toggle the mode.
+  bool aec;
+  webrtc::EcModes ec_mode;
+  if (voe_wrapper_->processing()->GetEcStatus(aec, ec_mode) == -1) {
+    LOG_RTCERR0(GetEcStatus);
+    return false;
+  }
+  ec_mode = enable ? webrtc::kEcConference : webrtc::kEcDefault;
+  if (voe_wrapper_->processing()->SetEcStatus(aec, ec_mode) == -1) {
+    LOG_RTCERR2(SetEcStatus, aec, ec_mode);
+    return false;
+  }
+  return true;
+}
+
+// WebRtcVoiceMediaChannel
+WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
+    : WebRtcMediaChannel<VoiceMediaChannel, WebRtcVoiceEngine>(
+          engine,
+          engine->voe()->base()->CreateChannel()),
+      channel_options_(0),
+      agc_adjusted_(false),
+      dtmf_allowed_(false),
+      desired_playout_(false),
+      playout_(false),
+      desired_send_(SEND_NOTHING),
+      send_(SEND_NOTHING) {
+  engine->RegisterChannel(this);
+  LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
+                  << voe_channel();
+
+  // Register external transport
+  if (engine->voe()->network()->RegisterExternalTransport(
+      voe_channel(), *static_cast<Transport*>(this)) == -1) {
+    LOG_RTCERR2(RegisterExternalTransport, voe_channel(), this);
+  }
+
+  // Enable RTCP (for quality stats and feedback messages)
+  EnableRtcp(voe_channel());
+
+  // Create a random but nonzero send SSRC
+  SetSendSsrc(talk_base::CreateRandomNonZeroId());
+}
+
+WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
+  LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel "
+                  << voe_channel();
+
+  // DeRegister external transport
+  if (engine()->voe()->network()->DeRegisterExternalTransport(
+      voe_channel()) == -1) {
+    LOG_RTCERR1(DeRegisterExternalTransport, voe_channel());
+  }
+
+  // Unregister ourselves from the engine.
+  engine()->UnregisterChannel(this);
+  // Remove any remaining streams.
+  while (!mux_channels_.empty()) {
+    RemoveStream(mux_channels_.begin()->first);
+  }
+  // Delete the primary channel.
+  if (engine()->voe()->base()->DeleteChannel(voe_channel()) == -1) {
+    LOG_RTCERR1(DeleteChannel, voe_channel());
+  }
+}
+
+bool WebRtcVoiceMediaChannel::SetOptions(int flags) {
+  // Always accept flags that are unchanged.
+  if (channel_options_ == flags) {
+    return true;
+  }
+
+  // Reject new options if we're already sending.
+  if (send_ != SEND_NOTHING) {
+    return false;
+  }
+
+  // Save the options, to be interpreted where appropriate.
+  channel_options_ = flags;
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetRecvCodecs(
+    const std::vector<AudioCodec>& codecs) {
+  // Update our receive payload types to match what we offered. This only is
+  // an issue when a different entity (i.e. a server) is generating the offer
+  // for us.
+  bool ret = true;
+  for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
+       i != codecs.end() && ret; ++i) {
+    webrtc::CodecInst gcodec;
+    if (engine()->FindWebRtcCodec(*i, &gcodec)) {
+      if (gcodec.pltype != i->id) {
+        LOG(LS_INFO) << "Updating payload type for " << gcodec.plname
+                  << " from " << gcodec.pltype << " to " << i->id;
+        gcodec.pltype = i->id;
+        if (engine()->voe()->codec()->SetRecPayloadType(
+            voe_channel(), gcodec) == -1) {
+          LOG_RTCERR1(SetRecPayloadType, voe_channel());
+          ret = false;
+        }
+      }
+    } else {
+      LOG(LS_WARNING) << "Unknown codec " << i->name;
+      ret = false;
+    }
+  }
+
+  return ret;
+}
+
+bool WebRtcVoiceMediaChannel::SetSendCodecs(
+    const std::vector<AudioCodec>& codecs) {
+  // Disable DTMF, VAD, and FEC unless we know the other side wants them.
+  dtmf_allowed_ = false;
+  engine()->voe()->codec()->SetVADStatus(voe_channel(), false);
+  engine()->voe()->rtp()->SetFECStatus(voe_channel(), false);
+
+  // Scan through the list to figure out the codec to use for sending, along
+  // with the proper configuration for VAD and DTMF.
+  bool first = true;
+  webrtc::CodecInst send_codec;
+  memset(&send_codec, 0, sizeof(send_codec));
+
+  for (std::vector<AudioCodec>::const_iterator i = codecs.begin();
+       i != codecs.end(); ++i) {
+    // Ignore codecs we don't know about. The negotiation step should prevent
+    // this, but double-check to be sure.
+    webrtc::CodecInst gcodec;
+    if (!engine()->FindWebRtcCodec(*i, &gcodec)) {
+      LOG(LS_WARNING) << "Unknown codec " << i->name;
+      continue;
+    }
+
+    // Find the DTMF telephone event "codec" and tell VoiceEngine about it.
+    if (i->name == "telephone-event" || i->name == "audio/telephone-event") {
+      engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
+          voe_channel(), i->id);
+      dtmf_allowed_ = true;
+    }
+
+    // Turn voice activity detection/comfort noise on if supported.
+    // Set the wideband CN payload type appropriately.
+    // (narrowband always uses the static payload type 13).
+    if (i->name == "CN") {
+      webrtc::PayloadFrequencies cn_freq;
+      switch (i->clockrate) {
+        case 8000:
+          cn_freq = webrtc::kFreq8000Hz;
+          break;
+        case 16000:
+          cn_freq = webrtc::kFreq16000Hz;
+          break;
+        case 32000:
+          cn_freq = webrtc::kFreq32000Hz;
+          break;
+        default:
+          LOG(LS_WARNING) << "CN frequency " << i->clockrate
+                          << " not supported.";
+          continue;
+      }
+      engine()->voe()->codec()->SetVADStatus(voe_channel(), true);
+      if (cn_freq != webrtc::kFreq8000Hz) {
+        engine()->voe()->codec()->SetSendCNPayloadType(voe_channel(),
+                                                       i->id, cn_freq);
+      }
+    }
+
+    // We'll use the first codec in the list to actually send audio data.
+    // Be sure to use the payload type requested by the remote side.
+    // "red", for FEC audio, is a special case where the actual codec to be
+    // used is specified in params.
+    if (first) {
+      if (i->name == "red") {
+        // Parse out the RED parameters. If we fail, just ignore RED;
+        // we don't support all possible params/usage scenarios.
+        if (!GetRedSendCodec(*i, codecs, &send_codec)) {
+          continue;
+        }
+
+        // Enable redundant encoding of the specified codec. Treat any
+        // failure as a fatal internal error.
+        LOG(LS_INFO) << "Enabling RED";
+        if (engine()->voe()->rtp()->SetFECStatus(voe_channel(),
+                                                    true, i->id) == -1) {
+          LOG_RTCERR3(SetFECStatus, voe_channel(), true, i->id);
+          return false;
+        }
+      } else {
+        send_codec = gcodec;
+        send_codec.pltype = i->id;
+      }
+      first = false;
+    }
+  }
+
+  // If we're being asked to set an empty list of codecs, due to a buggy client,
+  // choose the most common format: PCMU
+  if (first) {
+    LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
+    AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
+    engine()->FindWebRtcCodec(codec, &send_codec);
+  }
+
+  // Set the codec.
+  LOG(LS_INFO) << "Selected voice codec " << send_codec.plname
+            << "/" << send_codec.plfreq;
+  if (engine()->voe()->codec()->SetSendCodec(voe_channel(),
+                                                send_codec) == -1) {
+    LOG_RTCERR1(SetSendCodec, voe_channel());
+    return false;
+  }
+
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
+    const std::vector<RtpHeaderExtension>& extensions) {
+  // We don't support any incoming extensions headers right now.
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
+    const std::vector<RtpHeaderExtension>& extensions) {
+  // Enable the audio level extension header if requested.
+  std::vector<RtpHeaderExtension>::const_iterator it;
+  for (it = extensions.begin(); it != extensions.end(); ++it) {
+    if (it->uri == kRtpAudioLevelHeaderExtension) {
+      break;
+    }
+  }
+
+  bool enable = (it != extensions.end());
+  int id = 0;
+
+  if (enable) {
+    id = it->id;
+    if (id < kMinRtpHeaderExtensionId ||
+        id > kMaxRtpHeaderExtensionId) {
+      LOG(LS_WARNING) << "Invalid RTP header extension id " << id;
+      return false;
+    }
+  }
+
+// This api call is not available in iOS version of VoiceEngine currently.
+#if !defined(IOS) && !defined(ANDROID)
+  if (engine()->voe()->rtp()->SetRTPAudioLevelIndicationStatus(
+      voe_channel(), enable, id) == -1) {
+    LOG_RTCERR3(SetRTPAudioLevelIndicationStatus, voe_channel(), enable, id);
+    return false;
+  }
+#endif
+
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) {
+  desired_playout_ = playout;
+  return ChangePlayout(desired_playout_);
+}
+
+bool WebRtcVoiceMediaChannel::PausePlayout() {
+  return ChangePlayout(false);
+}
+
+bool WebRtcVoiceMediaChannel::ResumePlayout() {
+  return ChangePlayout(desired_playout_);
+}
+
+bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
+  if (playout_ == playout) {
+    return true;
+  }
+
+  bool result = true;
+  if (mux_channels_.empty()) {
+    // Only toggle the default channel if we don't have any other channels.
+    result = SetPlayout(voe_channel(), playout);
+  }
+  for (ChannelMap::iterator it = mux_channels_.begin();
+       it != mux_channels_.end() && result; ++it) {
+    if (!SetPlayout(it->second, playout)) {
+      LOG(LS_ERROR) << "SetPlayout " << playout << " on channel " << it->second
+                    << " failed";
+      result = false;
+    }
+  }
+
+  if (result) {
+    playout_ = playout;
+  }
+  return result;
+}
+
+bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) {
+  desired_send_ = send;
+  return ChangeSend(desired_send_);
+}
+
+bool WebRtcVoiceMediaChannel::PauseSend() {
+  return ChangeSend(SEND_NOTHING);
+}
+
+bool WebRtcVoiceMediaChannel::ResumeSend() {
+  return ChangeSend(desired_send_);
+}
+
+bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
+  if (send_ == send) {
+    return true;
+  }
+
+  if (send == SEND_MICROPHONE) {
+#ifdef CHROMEOS
+    // Conference mode doesn't work well on ChromeOS.
+    if (!engine()->SetConferenceMode(false)) {
+      LOG_RTCERR1(SetConferenceMode, voe_channel());
+      return false;
+    }
+#else
+    // Multi-point conferences use conference-mode noise filtering.
+    if (!engine()->SetConferenceMode(
+        0 != (channel_options_ & OPT_CONFERENCE))) {
+      LOG_RTCERR1(SetConferenceMode, voe_channel());
+      return false;
+    }
+#endif  // CHROMEOS
+
+    // Tandberg-bridged conferences have an AGC target that is lower than
+    // GTV-only levels.
+    if ((channel_options_ & OPT_AGC_TANDBERG_LEVELS) && !agc_adjusted_) {
+      if (engine()->AdjustAgcLevel(kTandbergDbAdjustment)) {
+        agc_adjusted_ = true;
+      }
+    }
+
+    // VoiceEngine resets sequence number when StopSend is called. This
+    // sometimes causes libSRTP to complain about packets being
+    // replayed. To get around this we store the last sent sequence
+    // number and initializes the channel with the next to continue on
+    // the same sequence.
+    if (sequence_number() != -1) {
+      LOG(LS_INFO) << "WebRtcVoiceMediaChannel restores seqnum="
+                   << sequence_number() + 1;
+      if (engine()->voe()->sync()->SetInitSequenceNumber(
+              voe_channel(), sequence_number() + 1) == -1) {
+        LOG_RTCERR2(SetInitSequenceNumber, voe_channel(),
+                    sequence_number() + 1);
+      }
+    }
+    if (engine()->voe()->base()->StartSend(voe_channel()) == -1) {
+      LOG_RTCERR1(StartSend, voe_channel());
+      return false;
+    }
+    if (engine()->voe()->file()->StopPlayingFileAsMicrophone(
+        voe_channel()) == -1) {
+      LOG_RTCERR1(StopPlayingFileAsMicrophone, voe_channel());
+      return false;
+    }
+  } else if (send == SEND_RINGBACKTONE) {
+    ASSERT(ringback_tone_.get() != NULL);
+    if (!ringback_tone_.get()) {
+      return false;
+    }
+    if (engine()->voe()->file()->StartPlayingFileAsMicrophone(
+        voe_channel(), ringback_tone_.get(), false) == -1) {
+      LOG_RTCERR3(StartPlayingFileAsMicrophone, voe_channel(),
+                  ringback_tone_.get(), false);
+      return false;
+    }
+    // VoiceEngine resets sequence number when StopSend is called. This
+    // sometimes causes libSRTP to complain about packets being
+    // replayed. To get around this we store the last sent sequence
+    // number and initializes the channel with the next to continue on
+    // the same sequence.
+    if (sequence_number() != -1) {
+      LOG(LS_INFO) << "WebRtcVoiceMediaChannel restores seqnum="
+                   << sequence_number() + 1;
+      if (engine()->voe()->sync()->SetInitSequenceNumber(
+              voe_channel(), sequence_number() + 1) == -1) {
+        LOG_RTCERR2(SetInitSequenceNumber, voe_channel(),
+                    sequence_number() + 1);
+      }
+    }
+    if (engine()->voe()->base()->StartSend(voe_channel()) == -1) {
+      LOG_RTCERR1(StartSend, voe_channel());
+      return false;
+    }
+  } else {  // SEND_NOTHING
+    if (engine()->voe()->base()->StopSend(voe_channel()) == -1) {
+      LOG_RTCERR1(StopSend, voe_channel());
+    }
+
+    // Reset the AGC level, if it was set.
+    if (agc_adjusted_) {
+      if (engine()->AdjustAgcLevel(0)) {
+        agc_adjusted_ = false;
+      }
+    }
+
+    // Disable conference-mode noise filtering.
+    if (!engine()->SetConferenceMode(false)) {
+      LOG_RTCERR1(SetConferenceMode, voe_channel());
+    }
+  }
+  send_ = send;
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::AddStream(uint32 ssrc) {
+  talk_base::CritScope lock(&mux_channels_cs_);
+
+  if (mux_channels_.find(ssrc) != mux_channels_.end()) {
+    return false;
+  }
+
+  // Create a new channel for receiving audio data.
+  int channel = engine()->voe()->base()->CreateChannel();
+  if (channel == -1) {
+    LOG_RTCERR0(CreateChannel);
+    return false;
+  }
+
+  // Configure to use external transport, like our default channel.
+  if (engine()->voe()->network()->RegisterExternalTransport(
+          channel, *this) == -1) {
+    LOG_RTCERR2(SetExternalTransport, channel, this);
+    return false;
+  }
+
+  // Use the same SSRC as our default channel (so the RTCP reports are correct).
+  unsigned int send_ssrc;
+  webrtc::VoERTP_RTCP* rtp = engine()->voe()->rtp();
+  if (rtp->GetLocalSSRC(voe_channel(), send_ssrc) == -1) {
+    LOG_RTCERR2(GetSendSSRC, channel, send_ssrc);
+    return false;
+  }
+  if (rtp->SetLocalSSRC(channel, send_ssrc) == -1) {
+    LOG_RTCERR2(SetSendSSRC, channel, send_ssrc);
+    return false;
+  }
+
+  if (mux_channels_.empty() && playout_) {
+    // This is the first stream in a multi user meeting. We can now
+    // disable playback of the default stream. This since the default
+    // stream will probably have received some initial packets before
+    // the new stream was added. This will mean that the CN state from
+    // the default channel will be mixed in with the other streams
+    // throughout the whole meeting, which might be disturbing.
+    LOG(LS_INFO) << "Disabling playback on the default voice channel";
+    SetPlayout(voe_channel(), false);
+  }
+
+  mux_channels_[ssrc] = channel;
+
+  // TODO: We should rollback the add if SetPlayout fails.
+  LOG(LS_INFO) << "New audio stream " << ssrc
+            << " registered to VoiceEngine channel #"
+            << channel << ".";
+  return SetPlayout(channel, playout_);
+}
+
+bool WebRtcVoiceMediaChannel::RemoveStream(uint32 ssrc) {
+  talk_base::CritScope lock(&mux_channels_cs_);
+  ChannelMap::iterator it = mux_channels_.find(ssrc);
+
+  if (it != mux_channels_.end()) {
+    if (engine()->voe()->network()->DeRegisterExternalTransport(
+        it->second) == -1) {
+      LOG_RTCERR1(DeRegisterExternalTransport, it->second);
+    }
+
+    LOG(LS_INFO) << "Removing audio stream " << ssrc
+              << " with VoiceEngine channel #"
+              << it->second << ".";
+    if (engine()->voe()->base()->DeleteChannel(it->second) == -1) {
+      LOG_RTCERR1(DeleteChannel, voe_channel());
+      return false;
+    }
+
+    mux_channels_.erase(it);
+    if (mux_channels_.empty() && playout_) {
+      // The last stream was removed. We can now enable the default
+      // channel for new channels to be played out immediately without
+      // waiting for AddStream messages.
+      // TODO: Does the default channel still have it's CN state?
+      LOG(LS_INFO) << "Enabling playback on the default voice channel";
+      SetPlayout(voe_channel(), true);
+    }
+  }
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::GetActiveStreams(
+    AudioInfo::StreamList* actives) {
+  actives->clear();
+  for (ChannelMap::iterator it = mux_channels_.begin();
+       it != mux_channels_.end(); ++it) {
+    int level = GetOutputLevel(it->second);
+    if (level > 0) {
+      actives->push_back(std::make_pair(it->first, level));
+    }
+  }
+  return true;
+}
+
+int WebRtcVoiceMediaChannel::GetOutputLevel() {
+  // return the highest output level of all streams
+  int highest = GetOutputLevel(voe_channel());
+  for (ChannelMap::iterator it = mux_channels_.begin();
+       it != mux_channels_.end(); ++it) {
+    int level = GetOutputLevel(it->second);
+    highest = talk_base::_max(level, highest);
+  }
+  return highest;
+}
+
+
+bool WebRtcVoiceMediaChannel::SetOutputScaling(
+    uint32 ssrc, double left, double right) {
+  talk_base::CritScope lock(&mux_channels_cs_);
+  // Collect the channels to scale the output volume.
+  std::vector<int> channels;
+  if (0 == ssrc) {  // Collect all channels, including the default one.
+    channels.push_back(voe_channel());
+    for (ChannelMap::const_iterator it = mux_channels_.begin();
+        it != mux_channels_.end(); ++it) {
+      channels.push_back(it->second);
+    }
+  } else {  // Collect only the channel of the specified ssrc.
+    int channel = GetChannel(ssrc);
+    if (-1 == channel) {
+      LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
+      return false;
+    }
+    channels.push_back(channel);
+  }
+
+  // Scale the output volume for the collected channels. We first normalize to
+  // scale the volume and then set the left and right pan.
+  float scale = static_cast<float>(talk_base::_max(left, right));
+  if (scale > 0.0001f) {
+    left /= scale;
+    right /= scale;
+  }
+  for (std::vector<int>::const_iterator it = channels.begin();
+      it != channels.end(); ++it) {
+    if (-1 == engine()->voe()->volume()->SetChannelOutputVolumeScaling(
+        *it, scale)) {
+      LOG_RTCERR2(SetChannelOutputVolumeScaling, *it, scale);
+      return false;
+    }
+    if (-1 == engine()->voe()->volume()->SetOutputVolumePan(
+        *it, static_cast<float>(left), static_cast<float>(right))) {
+      LOG_RTCERR3(SetOutputVolumePan, *it, left, right);
+      // Do not return if fails. SetOutputVolumePan is not available for all
+      // pltforms.
+    }
+    LOG(LS_INFO) << "SetOutputScaling to left=" << left * scale
+                 << " right=" << right * scale
+                 << " for channel " << *it << " and ssrc " << ssrc;
+  }
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::GetOutputScaling(
+    uint32 ssrc, double* left, double* right) {
+  if (!left || !right) return false;
+
+  talk_base::CritScope lock(&mux_channels_cs_);
+  // Determine which channel based on ssrc.
+  int channel = (0 == ssrc) ? voe_channel() : GetChannel(ssrc);
+  if (channel == -1) {
+    LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
+    return false;
+  }
+
+  float scaling;
+  if (-1 == engine()->voe()->volume()->GetChannelOutputVolumeScaling(
+      channel, scaling)) {
+    LOG_RTCERR2(GetChannelOutputVolumeScaling, channel, scaling);
+    return false;
+  }
+
+  float left_pan;
+  float right_pan;
+  if (-1 == engine()->voe()->volume()->GetOutputVolumePan(
+      channel, left_pan, right_pan)) {
+    LOG_RTCERR3(GetOutputVolumePan, channel, left_pan, right_pan);
+    // If GetOutputVolumePan fails, we use the default left and right pan.
+    left_pan = 1.0f;
+    right_pan = 1.0f;
+  }
+
+  *left = scaling * left_pan;
+  *right = scaling * right_pan;
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
+  ringback_tone_.reset(new WebRtcSoundclipStream(buf, len));
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::PlayRingbackTone(uint32 ssrc,
+                                             bool play, bool loop) {
+  if (!ringback_tone_.get()) {
+    return false;
+  }
+
+  // Determine which VoiceEngine channel to play on.
+  int channel = (ssrc == 0) ? voe_channel() : GetChannel(ssrc);
+  if (channel == -1) {
+    return false;
+  }
+
+  // Make sure the ringtone is cued properly, and play it out.
+  if (play) {
+    ringback_tone_->set_loop(loop);
+    ringback_tone_->Rewind();
+    if (engine()->voe()->file()->StartPlayingFileLocally(channel,
+        ringback_tone_.get()) == -1) {
+      LOG_RTCERR2(StartPlayingFileLocally, channel, ringback_tone_.get());
+      LOG(LS_ERROR) << "Unable to start ringback tone";
+      return false;
+    }
+    ringback_channels_.insert(channel);
+    LOG(LS_INFO) << "Started ringback on channel " << channel;
+  } else {
+    if (engine()->voe()->file()->StopPlayingFileLocally(channel)
+        == -1) {
+      LOG_RTCERR1(StopPlayingFileLocally, channel);
+      return false;
+    }
+    LOG(LS_INFO) << "Stopped ringback on channel " << channel;
+    ringback_channels_.erase(channel);
+  }
+
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::PressDTMF(int event, bool playout) {
+  if (!dtmf_allowed_) {
+    return false;
+  }
+
+  // Enable or disable DTMF playout of this tone as requested. This will linger
+  // until the next call to this method, but that's OK.
+  if (engine()->voe()->dtmf()->SetDtmfFeedbackStatus(playout) == -1) {
+    LOG_RTCERR2(SendDTMF, voe_channel(), playout);
+    return false;
+  }
+
+  // Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
+  if (engine()->voe()->dtmf()->SendTelephoneEvent(voe_channel(), event,
+      true) == -1) {
+    LOG_RTCERR3(SendDTMF, voe_channel(), event, true);
+    return false;
+  }
+
+  return true;
+}
+
+void WebRtcVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
+  // Pick which channel to send this packet to. If this packet doesn't match
+  // any multiplexed streams, just send it to the default channel. Otherwise,
+  // send it to the specific decoder instance for that stream.
+  int which_channel = GetChannel(
+      ParseSsrc(packet->data(), packet->length(), false));
+  if (which_channel == -1) {
+    which_channel = voe_channel();
+  }
+
+  // Stop any ringback that might be playing on the channel.
+  // It's possible the ringback has already stopped, ih which case we'll just
+  // use the opportunity to remove the channel from ringback_channels_.
+  const std::set<int>::iterator it = ringback_channels_.find(which_channel);
+  if (it != ringback_channels_.end()) {
+    if (engine()->voe()->file()->IsPlayingFileLocally(
+        which_channel) == 1) {
+      engine()->voe()->file()->StopPlayingFileLocally(which_channel);
+      LOG(LS_INFO) << "Stopped ringback on channel " << which_channel
+                   << " due to incoming media";
+    }
+    ringback_channels_.erase(which_channel);
+  }
+
+  // Pass it off to the decoder.
+  engine()->voe()->network()->ReceivedRTPPacket(which_channel,
+                                                   packet->data(),
+                                                   packet->length());
+}
+
+void WebRtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
+  // See above.
+  int which_channel = GetChannel(
+      ParseSsrc(packet->data(), packet->length(), true));
+  if (which_channel == -1) {
+    which_channel = voe_channel();
+  }
+
+  engine()->voe()->network()->ReceivedRTCPPacket(which_channel,
+                                                    packet->data(),
+                                                    packet->length());
+}
+
+void WebRtcVoiceMediaChannel::SetSendSsrc(uint32 ssrc) {
+  if (engine()->voe()->rtp()->SetLocalSSRC(voe_channel(), ssrc)
+      == -1) {
+     LOG_RTCERR2(SetSendSSRC, voe_channel(), ssrc);
+  }
+}
+
+bool WebRtcVoiceMediaChannel::SetRtcpCName(const std::string& cname) {
+  if (engine()->voe()->rtp()->SetRTCP_CNAME(voe_channel(),
+                                                    cname.c_str()) == -1) {
+     LOG_RTCERR2(SetRTCP_CNAME, voe_channel(), cname);
+     return false;
+  }
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::Mute(bool muted) {
+  if (engine()->voe()->volume()->SetInputMute(voe_channel(),
+      muted) == -1) {
+    LOG_RTCERR2(SetInputMute, voe_channel(), muted);
+    return false;
+  }
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
+  // In VoiceEngine 3.5, GetRTCPStatistics will return 0 even when it fails,
+  // causing the stats to contain garbage information. To prevent this, we
+  // zero the stats structure before calling this API.
+  // TODO: Remove this workaround.
+  webrtc::CallStatistics cs;
+  unsigned int ssrc;
+  webrtc::CodecInst codec;
+  unsigned int level;
+
+  // Fill in the sender info, based on what we know, and what the
+  // remote side told us it got from its RTCP report.
+  VoiceSenderInfo sinfo;
+  memset(&sinfo, 0, sizeof(sinfo));
+
+  // Data we obtain locally.
+  memset(&cs, 0, sizeof(cs));
+  if (engine()->voe()->rtp()->GetRTCPStatistics(voe_channel(), cs) == -1 ||
+      engine()->voe()->rtp()->GetLocalSSRC(voe_channel(), ssrc) == -1) {
+    return false;
+  }
+
+  sinfo.ssrc = ssrc;
+  sinfo.bytes_sent = cs.bytesSent;
+  sinfo.packets_sent = cs.packetsSent;
+  // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
+  // returns 0 to indicate an error value.
+  sinfo.rtt_ms = (cs.rttMs > 0) ? cs.rttMs : -1;
+
+  // Data from the last remote RTCP report.
+  unsigned int ntp_high, ntp_low, timestamp, ptimestamp, jitter;
+  unsigned short loss;  // NOLINT
+  if (engine()->voe()->rtp()->GetRemoteRTCPData(voe_channel(),
+          ntp_high, ntp_low, timestamp, ptimestamp, &jitter, &loss) != -1 &&
+      engine()->voe()->codec()->GetSendCodec(voe_channel(),
+          codec) != -1) {
+    // Convert Q8 to floating point.
+    sinfo.fraction_lost = static_cast<float>(loss) / (1 << 8);
+    // Convert samples to milliseconds.
+    if (codec.plfreq / 1000 > 0) {
+      sinfo.jitter_ms = jitter / (codec.plfreq / 1000);
+    }
+  } else {
+    sinfo.fraction_lost = -1;
+    sinfo.jitter_ms = -1;
+  }
+  // TODO: Figure out how to get remote packets_lost, ext_seqnum
+  sinfo.packets_lost = -1;
+  sinfo.ext_seqnum = -1;
+
+  // Local speech level.
+  sinfo.audio_level = (engine()->voe()->volume()->
+      GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
+  info->senders.push_back(sinfo);
+
+  // Build the list of receivers, one for each mux channel, or 1 in a 1:1 call.
+  std::vector<int> channels;
+  for (ChannelMap::const_iterator it = mux_channels_.begin();
+       it != mux_channels_.end(); ++it) {
+    channels.push_back(it->second);
+  }
+  if (channels.empty()) {
+    channels.push_back(voe_channel());
+  }
+
+  // Get the SSRC and stats for each receiver, based on our own calculations.
+  for (std::vector<int>::const_iterator it = channels.begin();
+       it != channels.end(); ++it) {
+    memset(&cs, 0, sizeof(cs));
+    if (engine()->voe()->rtp()->GetRemoteSSRC(*it, ssrc) != -1 &&
+        engine()->voe()->rtp()->GetRTCPStatistics(*it, cs) != -1 &&
+        engine()->voe()->codec()->GetRecCodec(*it, codec) != -1) {
+      VoiceReceiverInfo rinfo;
+      memset(&rinfo, 0, sizeof(rinfo));
+      rinfo.ssrc = ssrc;
+      rinfo.bytes_rcvd = cs.bytesReceived;
+      rinfo.packets_rcvd = cs.packetsReceived;
+      // The next four fields are from the most recently sent RTCP report.
+      // Convert Q8 to floating point.
+      rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
+      rinfo.packets_lost = cs.cumulativeLost;
+      rinfo.ext_seqnum = cs.extendedMax;
+      // Convert samples to milliseconds.
+      if (codec.plfreq / 1000 > 0) {
+        rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
+      }
+
+      // Get jitter buffer and total delay (alg + jitter + playout) stats.
+      webrtc::NetworkStatistics ns;
+      if (engine()->voe()->neteq() &&
+          engine()->voe()->neteq()->GetNetworkStatistics(
+              *it, ns) != -1) {
+        rinfo.jitter_buffer_ms = ns.currentBufferSize;
+        rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
+      }
+      if (engine()->voe()->sync()) {
+        engine()->voe()->sync()->GetDelayEstimate(*it,
+            rinfo.delay_estimate_ms);
+      }
+
+      // Get speech level.
+      rinfo.audio_level = (engine()->voe()->volume()->
+          GetSpeechOutputLevelFullRange(*it, level) != -1) ? level : -1;
+      info->receivers.push_back(rinfo);
+    }
+  }
+
+  return true;
+}
+
+void WebRtcVoiceMediaChannel::GetLastMediaError(
+    uint32* ssrc, VoiceMediaChannel::Error* error) {
+  ASSERT(ssrc != NULL);
+  ASSERT(error != NULL);
+  FindSsrc(voe_channel(), ssrc);
+  *error = WebRtcErrorToChannelError(GetLastEngineError());
+}
+
+bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
+  talk_base::CritScope lock(&mux_channels_cs_);
+  ASSERT(ssrc != NULL);
+  if (channel_num == voe_channel()) {
+    unsigned local_ssrc = 0;
+    // This is a sending channel.
+    if (engine()->voe()->rtp()->GetLocalSSRC(
+        channel_num, local_ssrc) != -1) {
+      *ssrc = local_ssrc;
+    }
+    return true;
+  } else if (channel_num == -1 && send_ != SEND_NOTHING) {
+    // Sometimes the VoiceEngine core will throw error with channel_num = -1.
+    // This means the error is not limited to a specific channel.  Signal the
+    // message using ssrc=0.  If the current channel is sending, use this
+    // channel for sending the message.
+    *ssrc = 0;
+    return true;
+  } else {
+    // Check whether this is a receiving channel.
+    for (ChannelMap::const_iterator it = mux_channels_.begin();
+        it != mux_channels_.end(); ++it) {
+      if (it->second == channel_num) {
+        *ssrc = it->first;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+void WebRtcVoiceMediaChannel::OnError(uint32 ssrc, int error) {
+  SignalMediaError(ssrc, WebRtcErrorToChannelError(error));
+}
+
+int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
+  unsigned int ulevel;
+  int ret =
+      engine()->voe()->volume()->GetSpeechOutputLevel(channel, ulevel);
+  return (ret == 0) ? static_cast<int>(ulevel) : -1;
+}
+
+int WebRtcVoiceMediaChannel::GetChannel(uint32 ssrc) {
+  ChannelMap::iterator it = mux_channels_.find(ssrc);
+  return (it != mux_channels_.end()) ? it->second : -1;
+}
+
+bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
+    const std::vector<AudioCodec>& all_codecs, webrtc::CodecInst* send_codec) {
+  // Get the RED encodings from the parameter with no name. This may
+  // change based on what is discussed on the Jingle list.
+  // The encoding parameter is of the form "a/b"; we only support where
+  // a == b. Verify this and parse out the value into red_pt.
+  // If the parameter value is absent (as it will be until we wire up the
+  // signaling of this message), use the second codec specified (i.e. the
+  // one after "red") as the encoding parameter.
+  int red_pt = -1;
+  std::string red_params;
+  CodecParameterMap::const_iterator it = red_codec.params.find("");
+  if (it != red_codec.params.end()) {
+    red_params = it->second;
+    std::vector<std::string> red_pts;
+    if (talk_base::split(red_params, '/', &red_pts) != 2 ||
+        red_pts[0] != red_pts[1] ||
+        !talk_base::FromString(red_pts[0], &red_pt)) {
+      LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
+      return false;
+    }
+  } else if (red_codec.params.empty()) {
+    LOG(LS_WARNING) << "RED params not present, using defaults";
+    if (all_codecs.size() > 1) {
+      red_pt = all_codecs[1].id;
+    }
+  }
+
+  // Try to find red_pt in |codecs|.
+  std::vector<AudioCodec>::const_iterator codec;
+  for (codec = all_codecs.begin(); codec != all_codecs.end(); ++codec) {
+    if (codec->id == red_pt)
+      break;
+  }
+
+  // If we find the right codec, that will be the codec we pass to
+  // SetSendCodec, with the desired payload type.
+  if (codec != all_codecs.end() &&
+    engine()->FindWebRtcCodec(*codec, send_codec)) {
+    send_codec->pltype = red_pt;
+  } else {
+    LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
+    return false;
+  }
+
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::EnableRtcp(int channel) {
+  if (engine()->voe()->rtp()->SetRTCPStatus(channel, true) == -1) {
+    LOG_RTCERR2(SetRTCPStatus, voe_channel(), 1);
+    return false;
+  }
+  // TODO: Enable VQMon and RTCP XR reports, once we know what
+  // what we want to do with them.
+  // engine()->voe().EnableVQMon(voe_channel(), true);
+  // engine()->voe().EnableRTCP_XR(voe_channel(), true);
+  return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
+  if (playout) {
+    LOG(LS_INFO) << "Starting playout for channel #" << channel;
+    if (engine()->voe()->base()->StartPlayout(channel) == -1) {
+      LOG_RTCERR1(StartPlayout, channel);
+      return false;
+    }
+  } else {
+    LOG(LS_INFO) << "Stopping playout for channel #" << channel;
+    engine()->voe()->base()->StopPlayout(channel);
+  }
+  return true;
+}
+
+uint32 WebRtcVoiceMediaChannel::ParseSsrc(const void* data, size_t len,
+                                        bool rtcp) {
+  size_t ssrc_pos = (!rtcp) ? 8 : 4;
+  uint32 ssrc = 0;
+  if (len >= (ssrc_pos + sizeof(ssrc))) {
+    ssrc = talk_base::GetBE32(static_cast<const char*>(data) + ssrc_pos);
+  }
+  return ssrc;
+}
+
+// Convert VoiceEngine error code into VoiceMediaChannel::Error enum.
+VoiceMediaChannel::Error
+    WebRtcVoiceMediaChannel::WebRtcErrorToChannelError(int err_code) {
+  switch (err_code) {
+    case 0:
+      return ERROR_NONE;
+    case VE_CANNOT_START_RECORDING:
+    case VE_MIC_VOL_ERROR:
+    case VE_GET_MIC_VOL_ERROR:
+    case VE_CANNOT_ACCESS_MIC_VOL:
+      return ERROR_REC_DEVICE_OPEN_FAILED;
+    case VE_SATURATION_WARNING:
+      return ERROR_REC_DEVICE_SATURATION;
+    case VE_REC_DEVICE_REMOVED:
+      return ERROR_REC_DEVICE_REMOVED;
+    case VE_RUNTIME_REC_WARNING:
+    case VE_RUNTIME_REC_ERROR:
+      return ERROR_REC_RUNTIME_ERROR;
+    case VE_CANNOT_START_PLAYOUT:
+    case VE_SPEAKER_VOL_ERROR:
+    case VE_GET_SPEAKER_VOL_ERROR:
+    case VE_CANNOT_ACCESS_SPEAKER_VOL:
+      return ERROR_PLAY_DEVICE_OPEN_FAILED;
+    case VE_RUNTIME_PLAY_WARNING:
+    case VE_RUNTIME_PLAY_ERROR:
+      return ERROR_PLAY_RUNTIME_ERROR;
+    case VE_TYPING_NOISE_WARNING:
+      return ERROR_REC_TYPING_NOISE_DETECTED;
+    default:
+      return VoiceMediaChannel::ERROR_OTHER;
+  }
+}
+
+int WebRtcSoundclipStream::Read(void *buf, int len) {
+  size_t res = 0;
+  mem_.Read(buf, len, &res, NULL);
+  return res;
+}
+
+int WebRtcSoundclipStream::Rewind() {
+  mem_.Rewind();
+  // Return -1 to keep VoiceEngine from looping.
+  return (loop_) ? 0 : -1;
+}
+
+}  // namespace cricket
+
+#endif  // HAVE_WEBRTC_VOICE
diff --git a/talk/session/phone/webrtcvoiceengine.h b/talk/session/phone/webrtcvoiceengine.h
new file mode 100644
index 0000000..8234b9b
--- /dev/null
+++ b/talk/session/phone/webrtcvoiceengine.h
@@ -0,0 +1,323 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
+#define TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "talk/base/buffer.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stream.h"
+#include "talk/session/phone/channel.h"
+#include "talk/session/phone/rtputils.h"
+#include "talk/session/phone/webrtccommon.h"
+
+namespace cricket {
+
+// WebRtcSoundclipStream is an adapter object that allows a memory stream to be
+// passed into WebRtc, and support looping.
+class WebRtcSoundclipStream : public webrtc::InStream {
+ public:
+  WebRtcSoundclipStream(const char* buf, size_t len)
+      : mem_(buf, len), loop_(true) {
+  }
+  void set_loop(bool loop) { loop_ = loop; }
+  virtual int Read(void* buf, int len);
+  virtual int Rewind();
+
+ private:
+  talk_base::MemoryStream mem_;
+  bool loop_;
+};
+
+// WebRtcMonitorStream is used to monitor a stream coming from WebRtc.
+// For now we just dump the data.
+class WebRtcMonitorStream : public webrtc::OutStream {
+  virtual bool Write(const void *buf, int len) {
+    return true;
+  }
+};
+
+class AudioDeviceModule;
+class VoETraceWrapper;
+class VoEWrapper;
+class WebRtcSoundclipMedia;
+class WebRtcVoiceMediaChannel;
+
+// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
+// It uses the WebRtc VoiceEngine library for audio handling.
+class WebRtcVoiceEngine
+    : public webrtc::VoiceEngineObserver,
+      public webrtc::TraceCallback {
+ public:
+  WebRtcVoiceEngine();
+  WebRtcVoiceEngine(webrtc::AudioDeviceModule* adm,
+                    webrtc::AudioDeviceModule* adm_sc);
+  // Dependency injection for testing.
+  WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
+                    VoEWrapper* voe_wrapper_sc,
+                    VoETraceWrapper* tracing);
+  ~WebRtcVoiceEngine();
+  bool Init();
+  void Terminate();
+
+  int GetCapabilities();
+  VoiceMediaChannel* CreateChannel();
+
+  SoundclipMedia* CreateSoundclip();
+
+  bool SetOptions(int options);
+  bool SetDevices(const Device* in_device, const Device* out_device);
+  bool GetOutputVolume(int* level);
+  bool SetOutputVolume(int level);
+  int GetInputLevel();
+  bool SetLocalMonitor(bool enable);
+
+  const std::vector<AudioCodec>& codecs();
+  bool FindCodec(const AudioCodec& codec);
+  bool FindWebRtcCodec(const AudioCodec& codec, webrtc::CodecInst* gcodec);
+
+  void SetLogging(int min_sev, const char* filter);
+
+  // For tracking WebRtc channels. Needed because we have to pause them
+  // all when switching devices.
+  // May only be called by WebRtcVoiceMediaChannel.
+  void RegisterChannel(WebRtcVoiceMediaChannel *channel);
+  void UnregisterChannel(WebRtcVoiceMediaChannel *channel);
+
+  // May only be called by WebRtcSoundclipMedia.
+  void RegisterSoundclip(WebRtcSoundclipMedia *channel);
+  void UnregisterSoundclip(WebRtcSoundclipMedia *channel);
+
+  // Called by WebRtcVoiceMediaChannel to set a gain offset from
+  // the default AGC target level.
+  bool AdjustAgcLevel(int delta);
+
+  // Called by WebRtcVoiceMediaChannel to configure echo cancellation
+  // and noise suppression modes.
+  bool SetConferenceMode(bool enable);
+
+  VoEWrapper* voe() { return voe_wrapper_.get(); }
+  VoEWrapper* voe_sc() { return voe_wrapper_sc_.get(); }
+  int GetLastEngineError();
+
+ private:
+  typedef std::vector<WebRtcSoundclipMedia *> SoundclipList;
+  typedef std::vector<WebRtcVoiceMediaChannel *> ChannelList;
+
+  struct CodecPref {
+    const char* name;
+    int clockrate;
+  };
+
+  void Construct();
+  bool InitInternal();
+  void ApplyLogging(const std::string& log_filter);
+  virtual void Print(const webrtc::TraceLevel level,
+                     const char* trace_string, const int length);
+  virtual void CallbackOnError(const int channel, const int errCode);
+  static int GetCodecPreference(const char *name, int clockrate);
+  // Given the device type, name, and id, find device id. Return true and
+  // set the output parameter rtc_id if successful.
+  bool FindWebRtcAudioDeviceId(
+      bool is_input, const std::string& dev_name, int dev_id, int* rtc_id);
+  bool FindChannelAndSsrc(int channel_num,
+                          WebRtcVoiceMediaChannel** channel,
+                          uint32* ssrc) const;
+  bool ChangeLocalMonitor(bool enable);
+  bool PauseLocalMonitor();
+  bool ResumeLocalMonitor();
+
+  static const int kDefaultLogSeverity = talk_base::LS_WARNING;
+  static const CodecPref kCodecPrefs[];
+
+  // The primary instance of WebRtc VoiceEngine.
+  talk_base::scoped_ptr<VoEWrapper> voe_wrapper_;
+  // A secondary instance, for playing out soundclips (on the 'ring' device).
+  talk_base::scoped_ptr<VoEWrapper> voe_wrapper_sc_;
+  talk_base::scoped_ptr<VoETraceWrapper> tracing_;
+  // The external audio device manager
+  webrtc::AudioDeviceModule* adm_;
+  webrtc::AudioDeviceModule* adm_sc_;
+  int log_level_;
+  std::string log_filter_;
+  bool is_dumping_aec_;
+  std::vector<AudioCodec> codecs_;
+  bool desired_local_monitor_enable_;
+  talk_base::scoped_ptr<WebRtcMonitorStream> monitor_;
+  SoundclipList soundclips_;
+  ChannelList channels_;
+  // channels_ can be read from WebRtc callback thread. We need a lock on that
+  // callback as well as the RegisterChannel/UnregisterChannel.
+  talk_base::CriticalSection channels_cs_;
+  webrtc::AgcConfig default_agc_config_;
+  bool initialized_;
+};
+
+// WebRtcMediaChannel is a class that implements the common WebRtc channel
+// functionality.
+template <class T, class E>
+class WebRtcMediaChannel : public T, public webrtc::Transport {
+ public:
+  WebRtcMediaChannel(E *engine, int channel)
+      : engine_(engine), voe_channel_(channel), sequence_number_(-1) {}
+  E *engine() { return engine_; }
+  int voe_channel() const { return voe_channel_; }
+  bool valid() const { return voe_channel_ != -1; }
+
+ protected:
+  // implements Transport interface
+  virtual int SendPacket(int channel, const void *data, int len) {
+    if (!T::network_interface_) {
+      return -1;
+    }
+
+    // We need to store the sequence number to be able to pick up
+    // the same sequence when the device is restarted.
+    // TODO: Remove when WebRtc has fixed the problem.
+    int seq_num;
+    if (!GetRtpSeqNum(data, len, &seq_num)) {
+      return -1;
+    }
+    if (sequence_number() == -1) {
+      LOG(INFO) << "WebRtcVoiceMediaChannel sends first packet seqnum="
+                << seq_num;
+    }
+    sequence_number_ = seq_num;
+
+    talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+    return T::network_interface_->SendPacket(&packet) ? len : -1;
+  }
+  virtual int SendRTCPPacket(int channel, const void *data, int len) {
+    if (!T::network_interface_) {
+      return -1;
+    }
+
+    talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+    return T::network_interface_->SendRtcp(&packet) ? len : -1;
+  }
+  int sequence_number() const {
+    return sequence_number_;
+  }
+
+ private:
+  E *engine_;
+  int voe_channel_;
+  int sequence_number_;
+};
+
+// WebRtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
+// WebRtc Voice Engine.
+class WebRtcVoiceMediaChannel
+    : public WebRtcMediaChannel<VoiceMediaChannel,
+                                WebRtcVoiceEngine> {
+ public:
+  explicit WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine);
+  virtual ~WebRtcVoiceMediaChannel();
+  virtual bool SetOptions(int options);
+  virtual bool SetRecvCodecs(const std::vector<AudioCodec> &codecs);
+  virtual bool SetSendCodecs(const std::vector<AudioCodec> &codecs);
+  virtual bool SetRecvRtpHeaderExtensions(
+      const std::vector<RtpHeaderExtension>& extensions);
+  virtual bool SetSendRtpHeaderExtensions(
+      const std::vector<RtpHeaderExtension>& extensions);
+  virtual bool SetPlayout(bool playout);
+  bool PausePlayout();
+  bool ResumePlayout();
+  virtual bool SetSend(SendFlags send);
+  bool PauseSend();
+  bool ResumeSend();
+  virtual bool AddStream(uint32 ssrc);
+  virtual bool RemoveStream(uint32 ssrc);
+  virtual bool GetActiveStreams(AudioInfo::StreamList* actives);
+  virtual int GetOutputLevel();
+  virtual bool SetOutputScaling(uint32 ssrc, double left, double right);
+  virtual bool GetOutputScaling(uint32 ssrc, double* left, double* right);
+
+  virtual bool SetRingbackTone(const char *buf, int len);
+  virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
+  virtual bool PressDTMF(int event, bool playout);
+
+  virtual void OnPacketReceived(talk_base::Buffer* packet);
+  virtual void OnRtcpReceived(talk_base::Buffer* packet);
+  virtual void SetSendSsrc(uint32 id);
+  virtual bool SetRtcpCName(const std::string& cname);
+  virtual bool Mute(bool mute);
+  virtual bool SetSendBandwidth(bool autobw, int bps) { return false; }
+  virtual bool GetStats(VoiceMediaInfo* info);
+  // Gets last reported error from WebRtc voice engine.  This should be only
+  // called in response a failure.
+  virtual void GetLastMediaError(uint32* ssrc,
+                                 VoiceMediaChannel::Error* error);
+  bool FindSsrc(int channel_num, uint32* ssrc);
+  void OnError(uint32 ssrc, int error);
+
+ protected:
+  int GetLastEngineError() { return engine()->GetLastEngineError(); }
+  int GetChannel(uint32 ssrc);
+  int GetOutputLevel(int channel);
+  bool GetRedSendCodec(const AudioCodec& red_codec,
+                       const std::vector<AudioCodec>& all_codecs,
+                       webrtc::CodecInst* send_codec);
+  bool EnableRtcp(int channel);
+  bool SetPlayout(int channel, bool playout);
+  static uint32 ParseSsrc(const void* data, size_t len, bool rtcp);
+  static Error WebRtcErrorToChannelError(int err_code);
+
+ private:
+  // Tandberg-bridged conferences require a -10dB gain adjustment,
+  // which is actually +10 in AgcConfig.targetLeveldBOv
+  static const int kTandbergDbAdjustment = 10;
+
+  bool ChangePlayout(bool playout);
+  bool ChangeSend(SendFlags send);
+
+  typedef std::map<uint32, int> ChannelMap;
+  talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
+  std::set<int> ringback_channels_;  // channels playing ringback
+  int channel_options_;
+  bool agc_adjusted_;
+  bool dtmf_allowed_;
+  bool desired_playout_;
+  bool playout_;
+  SendFlags desired_send_;
+  SendFlags send_;
+  ChannelMap mux_channels_;  // for multiple sources
+  // mux_channels_ can be read from WebRtc callback thread.  Accesses off the
+  // WebRtc thread must be synchronized with edits on the worker thread.  Reads
+  // on the worker thread are ok.
+  mutable talk_base::CriticalSection mux_channels_cs_;
+};
+}
+
+#endif  // TALK_SESSION_PHONE_WEBRTCVOICEENGINE_H_
diff --git a/talk/session/phone/webrtcvoiceengine_unittest.cc b/talk/session/phone/webrtcvoiceengine_unittest.cc
new file mode 100644
index 0000000..0660630
--- /dev/null
+++ b/talk/session/phone/webrtcvoiceengine_unittest.cc
@@ -0,0 +1,1148 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+//
+// Author: Justin Uberti (juberti@google.com)
+
+#include "talk/base/byteorder.h"
+#include "talk/base/gunit.h"
+#include "talk/session/phone/channel.h"
+#include "talk/session/phone/fakemediaengine.h"
+#include "talk/session/phone/fakertp.h"
+#include "talk/session/phone/fakesession.h"
+#include "talk/session/phone/fakewebrtcvoiceengine.h"
+#include "talk/session/phone/webrtcvoiceengine.h"
+
+// Tests for the WebRtcVoiceEngine/VoiceChannel code.
+
+static const cricket::AudioCodec kPcmuCodec(0, "PCMU", 8000, 64000, 1, 0);
+static const cricket::AudioCodec kIsacCodec(103, "ISAC", 16000, -1, 1, 0);
+static const cricket::AudioCodec kRedCodec(117, "red", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCn8000Codec(13, "CN", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCn16000Codec(105, "CN", 16000, 0, 1, 0);
+static const cricket::AudioCodec
+    kTelephoneEventCodec(106, "telephone-event", 8000, 0, 1, 0);
+static const cricket::AudioCodec* const kAudioCodecs[] = {
+    &kPcmuCodec, &kIsacCodec, &kRedCodec, &kCn8000Codec, &kCn16000Codec,
+    &kTelephoneEventCodec,
+};
+const char kRingbackTone[] = "RIFF____WAVE____ABCD1234";
+
+class FakeVoEWrapper : public cricket::VoEWrapper {
+ public:
+  explicit FakeVoEWrapper(cricket::FakeWebRtcVoiceEngine* engine)
+      : cricket::VoEWrapper(engine,  // processing
+                            engine,  // base
+                            engine,  // codec
+                            engine,  // dtmf
+                            engine,  // file
+                            engine,  // hw
+                            engine,  // media
+                            engine,  // neteq
+                            engine,  // network
+                            engine,  // rtp
+                            engine,  // sync
+                            engine) {  // volume
+  }
+};
+
+class NullVoETraceWrapper : public cricket::VoETraceWrapper {
+ public:
+  virtual int SetTraceFilter(const unsigned int filter) {
+    return 0;
+  }
+  virtual int SetTraceFile(const char* fileNameUTF8) {
+    return 0;
+  }
+  virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
+    return 0;
+  }
+};
+
+class WebRtcVoiceEngineTest : public testing::Test {
+ public:
+  class ChannelErrorListener : public sigslot::has_slots<> {
+   public:
+    explicit ChannelErrorListener(cricket::VoiceMediaChannel* channel)
+        : ssrc_(0), error_(cricket::VoiceMediaChannel::ERROR_NONE) {
+      ASSERT(channel != NULL);
+      channel->SignalMediaError.connect(
+          this, &ChannelErrorListener::OnVoiceChannelError);
+    }
+    void OnVoiceChannelError(uint32 ssrc,
+                             cricket::VoiceMediaChannel::Error error) {
+      ssrc_ = ssrc;
+      error_ = error;
+    }
+    void Reset() {
+      ssrc_ = 0;
+      error_ = cricket::VoiceMediaChannel::ERROR_NONE;
+    }
+    uint32 ssrc() const {
+      return ssrc_;
+    }
+    cricket::VoiceMediaChannel::Error error() const {
+      return error_;
+    }
+
+   private:
+    uint32 ssrc_;
+    cricket::VoiceMediaChannel::Error error_;
+  };
+  // TODO: Implement other stub interfaces (VQE)
+  WebRtcVoiceEngineTest()
+      : voe_(kAudioCodecs, ARRAY_SIZE(kAudioCodecs)),
+        voe_sc_(kAudioCodecs, ARRAY_SIZE(kAudioCodecs)),
+        engine_(new FakeVoEWrapper(&voe_),
+                new FakeVoEWrapper(&voe_sc_),
+                new NullVoETraceWrapper()),
+        channel_(NULL), soundclip_(NULL) {
+  }
+  bool SetupEngine() {
+    bool result = engine_.Init();
+    if (result) {
+      channel_ = engine_.CreateChannel();
+      result = (channel_ != NULL);
+    }
+    return result;
+  }
+  void DeliverPacket(const void* data, int len) {
+    talk_base::Buffer packet(data, len);
+    channel_->OnPacketReceived(&packet);
+  }
+  virtual void TearDown() {
+    delete soundclip_;
+    delete channel_;
+    engine_.Terminate();
+  }
+
+ protected:
+  cricket::FakeWebRtcVoiceEngine voe_;
+  cricket::FakeWebRtcVoiceEngine voe_sc_;
+  cricket::WebRtcVoiceEngine engine_;
+  cricket::VoiceMediaChannel* channel_;
+  cricket::SoundclipMedia* soundclip_;
+};
+
+// Tests that our stub library "works".
+TEST_F(WebRtcVoiceEngineTest, StartupShutdown) {
+  EXPECT_FALSE(voe_.IsInited());
+  EXPECT_FALSE(voe_sc_.IsInited());
+  EXPECT_TRUE(engine_.Init());
+  EXPECT_TRUE(voe_.IsInited());
+  EXPECT_TRUE(voe_sc_.IsInited());
+  engine_.Terminate();
+  EXPECT_FALSE(voe_.IsInited());
+  EXPECT_FALSE(voe_sc_.IsInited());
+}
+
+// Tests that we can create and destroy a channel.
+TEST_F(WebRtcVoiceEngineTest, CreateChannel) {
+  EXPECT_TRUE(engine_.Init());
+  channel_ = engine_.CreateChannel();
+  EXPECT_TRUE(channel_ != NULL);
+}
+
+// Tests that we properly handle failures in CreateChannel.
+TEST_F(WebRtcVoiceEngineTest, CreateChannelFail) {
+  voe_.set_fail_create_channel(true);
+  EXPECT_TRUE(engine_.Init());
+  channel_ = engine_.CreateChannel();
+  EXPECT_TRUE(channel_ == NULL);
+}
+
+// Tests that we can find codecs by name or id, and that we interpret the
+// clockrate and bitrate fields properly.
+TEST_F(WebRtcVoiceEngineTest, FindCodec) {
+  cricket::AudioCodec codec;
+  webrtc::CodecInst codec_inst;
+  // Find PCMU with explicit clockrate and bitrate.
+  EXPECT_TRUE(engine_.FindWebRtcCodec(kPcmuCodec, &codec_inst));
+  // Find ISAC with explicit clockrate and 0 bitrate.
+  EXPECT_TRUE(engine_.FindWebRtcCodec(kIsacCodec, &codec_inst));
+  // Find telephone-event with explicit clockrate and 0 bitrate.
+  EXPECT_TRUE(engine_.FindWebRtcCodec(kTelephoneEventCodec, &codec_inst));
+  // Find ISAC with a different payload id.
+  codec = kIsacCodec;
+  codec.id = 127;
+  EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+  // Find PCMU with a 0 clockrate.
+  codec = kPcmuCodec;
+  codec.clockrate = 0;
+  EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+  EXPECT_EQ(8000, codec_inst.plfreq);
+  // Find PCMU with a 0 bitrate.
+  codec = kPcmuCodec;
+  codec.bitrate = 0;
+  EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+  EXPECT_EQ(64000, codec_inst.rate);
+  // Find ISAC with an explicit bitrate.
+  codec = kIsacCodec;
+  codec.bitrate = 32000;
+  EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+  EXPECT_EQ(32000, codec_inst.rate);
+}
+
+// Test that we set our inbound codecs properly, including changing PT.
+TEST_F(WebRtcVoiceEngineTest, SetRecvCodecs) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 96;
+  EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+  gcodec.plfreq = 16000;
+  EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+}
+
+// Test that we fail to set an unknown inbound codec.
+TEST_F(WebRtcVoiceEngineTest, SetRecvCodecsUnsupportedCodec) {
+  EXPECT_TRUE(SetupEngine());
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(cricket::AudioCodec(127, "XYZ", 32000, 0, 1, 0));
+  EXPECT_FALSE(channel_->SetRecvCodecs(codecs));
+}
+
+// Test that we apply codecs properly.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecs) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs.push_back(kRedCodec);
+  codecs[0].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_FALSE(voe_.GetVAD(channel_num));
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+  EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+  EXPECT_EQ(105, voe_.GetSendCNPayloadType(channel_num, true));
+  EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we fall back to PCMU if no codecs are specified.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsNoCodecs) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(0, gcodec.pltype);
+  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_FALSE(voe_.GetVAD(channel_num));
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+  EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+  EXPECT_EQ(105, voe_.GetSendCNPayloadType(channel_num, true));
+  EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we set VAD and DTMF types correctly.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsCNandDTMF) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  // TODO: cn 32000
+  codecs.push_back(kCn16000Codec);
+  codecs.push_back(kCn8000Codec);
+  codecs.push_back(kTelephoneEventCodec);
+  codecs.push_back(kRedCodec);
+  codecs[0].id = 96;
+  codecs[2].id = 97;  // wideband CN
+  codecs[4].id = 98;  // DTMF
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_TRUE(voe_.GetVAD(channel_num));
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+  EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+  EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
+  EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we set up FEC correctly.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsRED) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kRedCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 127;
+  codecs[0].params[""] = "96/96";
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_TRUE(voe_.GetFEC(channel_num));
+  EXPECT_EQ(127, voe_.GetSendFECPayloadType(channel_num));
+}
+
+// Test that we set up FEC correctly if params are omitted.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsREDNoParams) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kRedCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 127;
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_TRUE(voe_.GetFEC(channel_num));
+  EXPECT_EQ(127, voe_.GetSendFECPayloadType(channel_num));
+}
+
+// Test that we ignore RED if the parameters aren't named the way we expect.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsBadRED1) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kRedCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 127;
+  codecs[0].params["ABC"] = "96/96";
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it uses different primary/secondary encoding.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsBadRED2) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kRedCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 127;
+  codecs[0].params[""] = "96/0";
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it uses more than 2 encodings.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsBadRED3) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kRedCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 127;
+  codecs[0].params[""] = "96/96/96";
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it has bogus codec ids.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsBadRED4) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kRedCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 127;
+  codecs[0].params[""] = "ABC/ABC";
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it refers to a codec that is not present.
+TEST_F(WebRtcVoiceEngineTest, SetSendCodecsBadRED5) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kRedCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 127;
+  codecs[0].params[""] = "97/97";
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we support setting an empty list of recv header extensions.
+TEST_F(WebRtcVoiceEngineTest, SetRecvRtpHeaderExtensions) {
+  EXPECT_TRUE(SetupEngine());
+  std::vector<cricket::RtpHeaderExtension> extensions;
+  int channel_num = voe_.GetLastChannel();
+  bool enable = false;
+  unsigned char id = 0;
+
+  // An empty list shouldn't cause audio-level headers to be enabled.
+  EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+  EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+      channel_num, enable, id));
+  EXPECT_FALSE(enable);
+
+  // Nor should indicating we can receive the audio-level header.
+  extensions.push_back(cricket::RtpHeaderExtension(
+      "urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
+  EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+  EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+      channel_num, enable, id));
+  EXPECT_FALSE(enable);
+}
+
+// Test that we support setting certain send header extensions.
+TEST_F(WebRtcVoiceEngineTest, SetSendRtpHeaderExtensions) {
+  EXPECT_TRUE(SetupEngine());
+  std::vector<cricket::RtpHeaderExtension> extensions;
+  int channel_num = voe_.GetLastChannel();
+  bool enable = false;
+  unsigned char id = 0;
+
+  // Ensure audio levels are off by default.
+  EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+      channel_num, enable, id));
+  EXPECT_FALSE(enable);
+
+  // Ensure audio levels stay off with an empty list of headers.
+  EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+  EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+      channel_num, enable, id));
+  EXPECT_FALSE(enable);
+
+  // Ensure audio levels are enabled if the audio-level header is specified.
+  extensions.push_back(cricket::RtpHeaderExtension(
+      "urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
+  EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+  EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+      channel_num, enable, id));
+  EXPECT_TRUE(enable);
+  EXPECT_EQ(8, id);
+
+  // Ensure audio levels go back off with an empty list.
+  extensions.clear();
+  EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+  EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+      channel_num, enable, id));
+  EXPECT_FALSE(enable);
+}
+
+// Test that we can create a channel and start sending/playing out on it.
+TEST_F(WebRtcVoiceEngineTest, SendAndPlayout) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_TRUE(voe_.GetSend(channel_num));
+  EXPECT_TRUE(channel_->SetPlayout(true));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+  EXPECT_FALSE(voe_.GetSend(channel_num));
+  EXPECT_TRUE(channel_->SetPlayout(false));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can set the devices to use.
+TEST_F(WebRtcVoiceEngineTest, SetDevices) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+  cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
+                              cricket::kFakeDefaultDeviceId);
+  cricket::Device dev(cricket::kFakeDeviceName,
+                      cricket::kFakeDeviceId);
+
+  // Test SetDevices() while not sending or playing.
+  EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
+
+  // Test SetDevices() while sending and playing.
+  EXPECT_TRUE(engine_.SetLocalMonitor(true));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_TRUE(channel_->SetPlayout(true));
+  EXPECT_TRUE(voe_.GetRecordingMicrophone());
+  EXPECT_TRUE(voe_.GetSend(channel_num));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num));
+
+  EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+  EXPECT_TRUE(voe_.GetRecordingMicrophone());
+  EXPECT_TRUE(voe_.GetSend(channel_num));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num));
+
+  // Test that failure to open newly selected devices does not prevent opening
+  // ones after that.
+  voe_.set_fail_start_recording_microphone(true);
+  voe_.set_playout_fail_channel(channel_num);
+  voe_.set_send_fail_channel(channel_num);
+
+  EXPECT_FALSE(engine_.SetDevices(&default_dev, &default_dev));
+
+  EXPECT_FALSE(voe_.GetRecordingMicrophone());
+  EXPECT_FALSE(voe_.GetSend(channel_num));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num));
+
+  voe_.set_fail_start_recording_microphone(false);
+  voe_.set_playout_fail_channel(-1);
+  voe_.set_send_fail_channel(-1);
+
+  EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+  EXPECT_TRUE(voe_.GetRecordingMicrophone());
+  EXPECT_TRUE(voe_.GetSend(channel_num));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can set the devices to use even if we failed to
+// open the initial ones.
+TEST_F(WebRtcVoiceEngineTest, SetDevicesWithInitiallyBadDevices) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+  cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
+                              cricket::kFakeDefaultDeviceId);
+  cricket::Device dev(cricket::kFakeDeviceName,
+                      cricket::kFakeDeviceId);
+
+  // Test that failure to open devices selected before starting
+  // send/play does not prevent opening newly selected ones after that.
+  voe_.set_fail_start_recording_microphone(true);
+  voe_.set_playout_fail_channel(channel_num);
+  voe_.set_send_fail_channel(channel_num);
+
+  EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
+
+  EXPECT_FALSE(engine_.SetLocalMonitor(true));
+  EXPECT_FALSE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_FALSE(channel_->SetPlayout(true));
+  EXPECT_FALSE(voe_.GetRecordingMicrophone());
+  EXPECT_FALSE(voe_.GetSend(channel_num));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num));
+
+  voe_.set_fail_start_recording_microphone(false);
+  voe_.set_playout_fail_channel(-1);
+  voe_.set_send_fail_channel(-1);
+
+  EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+  EXPECT_TRUE(voe_.GetRecordingMicrophone());
+  EXPECT_TRUE(voe_.GetSend(channel_num));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can create a channel configured for multi-point conferences,
+// and start sending/playing out on it.
+TEST_F(WebRtcVoiceEngineTest, ConferenceSendAndPlayout) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  EXPECT_TRUE(channel_->SetOptions(cricket::OPT_CONFERENCE));
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_TRUE(voe_.GetSend(channel_num));
+
+  bool enabled;
+  webrtc::EcModes ec_mode;
+  webrtc::NsModes ns_mode;
+  EXPECT_EQ(0, voe_.GetEcStatus(enabled, ec_mode));
+#ifdef CHROMEOS
+  EXPECT_EQ(webrtc::kEcDefault, ec_mode);
+#else
+  EXPECT_EQ(webrtc::kEcConference, ec_mode);
+#endif
+  EXPECT_EQ(0, voe_.GetNsStatus(enabled, ns_mode));
+  EXPECT_TRUE(enabled);
+#ifdef CHROMEOS
+  EXPECT_EQ(webrtc::kNsDefault, ns_mode);
+#else
+  EXPECT_EQ(webrtc::kNsConference, ns_mode);
+#endif
+
+  EXPECT_TRUE(channel_->SetPlayout(true));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+  EXPECT_FALSE(voe_.GetSend(channel_num));
+
+  EXPECT_EQ(0, voe_.GetEcStatus(enabled, ec_mode));
+  EXPECT_EQ(webrtc::kEcDefault, ec_mode);
+  EXPECT_EQ(0, voe_.GetNsStatus(enabled, ns_mode));
+  EXPECT_EQ(webrtc::kNsDefault, ns_mode);
+
+  EXPECT_TRUE(channel_->SetPlayout(false));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can create a channel configured for Codian bridges,
+// and start sending/playing out on it.
+TEST_F(WebRtcVoiceEngineTest, CodianSendAndPlayout) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  webrtc::AgcConfig agc_config;
+  EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
+  EXPECT_EQ(0, agc_config.targetLeveldBOv);
+  EXPECT_TRUE(channel_->SetOptions(cricket::OPT_AGC_TANDBERG_LEVELS));
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_TRUE(voe_.GetSend(channel_num));
+  EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
+  EXPECT_GT(agc_config.targetLeveldBOv, 0);  // level was attenuated
+  EXPECT_TRUE(channel_->SetPlayout(true));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+  EXPECT_FALSE(voe_.GetSend(channel_num));
+  EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
+  EXPECT_EQ(0, agc_config.targetLeveldBOv);  // level was restored
+  EXPECT_TRUE(channel_->SetPlayout(false));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can set the outgoing SSRC properly.
+TEST_F(WebRtcVoiceEngineTest, SetSendSsrc) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  unsigned int send_ssrc;
+  EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num, send_ssrc));
+  EXPECT_NE(0U, send_ssrc);
+  channel_->SetSendSsrc(0x99);
+  EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num, send_ssrc));
+  EXPECT_EQ(0x99U, send_ssrc);
+}
+
+// Test that we can properly receive packets.
+TEST_F(WebRtcVoiceEngineTest, Recv) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+  EXPECT_TRUE(voe_.CheckPacket(channel_num, kPcmuFrame,
+                                      sizeof(kPcmuFrame)));
+}
+
+// Test that we can add and remove streams, and do proper send/playout.
+// We can receive on multiple streams, but will only send on one.
+TEST_F(WebRtcVoiceEngineTest, SendAndPlayoutWithMultipleStreams) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num1 = voe_.GetLastChannel();
+
+  // Start playout on the default channel.
+  EXPECT_TRUE(channel_->SetPlayout(true));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num1));
+
+  // Adding another stream should disable playout on the default channel.
+  EXPECT_TRUE(channel_->AddStream(2));
+  int channel_num2 = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_TRUE(voe_.GetSend(channel_num1));
+  EXPECT_FALSE(voe_.GetSend(channel_num2));
+
+  // Make sure only the new channel is played out.
+  EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num2));
+
+  // Adding yet another stream should have stream 2 and 3 enabled for playout.
+  EXPECT_TRUE(channel_->AddStream(3));
+  int channel_num3 = voe_.GetLastChannel();
+  EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num2));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num3));
+  EXPECT_FALSE(voe_.GetSend(channel_num3));
+
+  // Stop sending.
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+  EXPECT_FALSE(voe_.GetSend(channel_num1));
+  EXPECT_FALSE(voe_.GetSend(channel_num2));
+  EXPECT_FALSE(voe_.GetSend(channel_num3));
+
+  // Stop playout.
+  EXPECT_TRUE(channel_->SetPlayout(false));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num2));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num3));
+
+  // Restart playout and make sure the default channel still is not played out.
+  EXPECT_TRUE(channel_->SetPlayout(true));
+  EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num2));
+  EXPECT_TRUE(voe_.GetPlayout(channel_num3));
+
+  // Now remove the new streams and verify that the default channel is
+  // played out again.
+  EXPECT_TRUE(channel_->RemoveStream(3));
+  EXPECT_TRUE(channel_->RemoveStream(2));
+
+  EXPECT_TRUE(voe_.GetPlayout(channel_num1));
+}
+
+// Test that we can set the outgoing SSRC properly with multiple streams.
+TEST_F(WebRtcVoiceEngineTest, SetSendSsrcWithMultipleStreams) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num1 = voe_.GetLastChannel();
+  unsigned int send_ssrc;
+  channel_->SetSendSsrc(0x99);
+  EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num1, send_ssrc));
+  EXPECT_EQ(0x99U, send_ssrc);
+  EXPECT_TRUE(channel_->AddStream(2));
+  int channel_num2 = voe_.GetLastChannel();
+  EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num2, send_ssrc));
+  EXPECT_EQ(0x99U, send_ssrc);
+}
+
+// Test that we properly handle failures to add a stream.
+TEST_F(WebRtcVoiceEngineTest, AddStreamFail) {
+  EXPECT_TRUE(SetupEngine());
+  voe_.set_fail_create_channel(true);
+  EXPECT_FALSE(channel_->AddStream(2));
+}
+
+// Test that we can properly receive packets on multiple streams.
+TEST_F(WebRtcVoiceEngineTest, RecvWithMultipleStreams) {
+  EXPECT_TRUE(SetupEngine());
+  EXPECT_TRUE(channel_->AddStream(1));
+  int channel_num1 = voe_.GetLastChannel();
+  EXPECT_TRUE(channel_->AddStream(2));
+  int channel_num2 = voe_.GetLastChannel();
+  EXPECT_TRUE(channel_->AddStream(3));
+  int channel_num3 = voe_.GetLastChannel();
+  // Create packets with the right SSRCs.
+  char packets[4][sizeof(kPcmuFrame)];
+  for (size_t i = 0; i < ARRAY_SIZE(packets); ++i) {
+    memcpy(packets[i], kPcmuFrame, sizeof(kPcmuFrame));
+    talk_base::SetBE32(packets[i] + 8, i);
+  }
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+  DeliverPacket(packets[0], sizeof(packets[0]));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+  DeliverPacket(packets[1], sizeof(packets[1]));
+  EXPECT_TRUE(voe_.CheckPacket(channel_num1, packets[1],
+                                      sizeof(packets[1])));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+  DeliverPacket(packets[2], sizeof(packets[2]));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+  EXPECT_TRUE(voe_.CheckPacket(channel_num2, packets[2],
+                                      sizeof(packets[2])));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+  DeliverPacket(packets[3], sizeof(packets[3]));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+  EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+  EXPECT_TRUE(voe_.CheckPacket(channel_num3, packets[3],
+                                      sizeof(packets[3])));
+  EXPECT_TRUE(channel_->RemoveStream(3));
+  EXPECT_TRUE(channel_->RemoveStream(2));
+  EXPECT_TRUE(channel_->RemoveStream(1));
+}
+
+// Test that we properly clean up any streams that were added, even if
+// not explicitly removed.
+TEST_F(WebRtcVoiceEngineTest, StreamCleanup) {
+  EXPECT_TRUE(SetupEngine());
+  EXPECT_TRUE(channel_->AddStream(1));
+  EXPECT_TRUE(channel_->AddStream(2));
+  EXPECT_EQ(3, voe_.GetNumChannels());  // default channel + 2 added
+  delete channel_;
+  channel_ = NULL;
+  EXPECT_EQ(0, voe_.GetNumChannels());
+}
+
+// Test that we can send DTMF properly, but only if the other side supports
+// telephone-event.
+TEST_F(WebRtcVoiceEngineTest, SendDtmf) {
+  EXPECT_TRUE(SetupEngine());
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_FALSE(channel_->PressDTMF(1, true));
+  codecs.push_back(kTelephoneEventCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(channel_->PressDTMF(1, true));
+}
+
+// Test that we can play a ringback tone properly in a single-stream call.
+TEST_F(WebRtcVoiceEngineTest, PlayRingback) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+  // Check we fail if no ringback tone specified.
+  EXPECT_FALSE(channel_->PlayRingbackTone(0, true, true));
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+  // Check we can set and play a ringback tone.
+  EXPECT_TRUE(channel_->SetRingbackTone(kRingbackTone, strlen(kRingbackTone)));
+  EXPECT_TRUE(channel_->PlayRingbackTone(0, true, true));
+  EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+  // Check we can stop the tone manually.
+  EXPECT_TRUE(channel_->PlayRingbackTone(0, false, false));
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+  // Check we stop the tone if a packet arrives.
+  EXPECT_TRUE(channel_->PlayRingbackTone(0, true, true));
+  EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+  DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+}
+
+// Test that we can play a ringback tone properly in a multi-stream call.
+TEST_F(WebRtcVoiceEngineTest, PlayRingbackWithMultipleStreams) {
+  EXPECT_TRUE(SetupEngine());
+  EXPECT_TRUE(channel_->AddStream(1));
+  EXPECT_TRUE(channel_->AddStream(2));
+  int channel_num = voe_.GetLastChannel();
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+  // Check we fail if no ringback tone specified.
+  EXPECT_FALSE(channel_->PlayRingbackTone(2, true, true));
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+  // Check we can set and play a ringback tone on the correct ssrc.
+  EXPECT_TRUE(channel_->SetRingbackTone(kRingbackTone, strlen(kRingbackTone)));
+  EXPECT_FALSE(channel_->PlayRingbackTone(77, true, true));
+  EXPECT_TRUE(channel_->PlayRingbackTone(2, true, true));
+  EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+  // Check we can stop the tone manually.
+  EXPECT_TRUE(channel_->PlayRingbackTone(2, false, false));
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+  // Check we stop the tone if a packet arrives, but only with the right SSRC.
+  EXPECT_TRUE(channel_->PlayRingbackTone(2, true, true));
+  EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+  // Send a packet with SSRC 1; the tone should not stop.
+  DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+  EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+  // Send a packet with SSRC 2; the tone should stop.
+  char packet[sizeof(kPcmuFrame)];
+  memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+  talk_base::SetBE32(packet + 8, 2);
+  DeliverPacket(packet, sizeof(packet));
+  EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+}
+
+// Tests creating soundclips, and make sure they come from the right engine.
+TEST_F(WebRtcVoiceEngineTest, CreateSoundclip) {
+  EXPECT_TRUE(engine_.Init());
+  soundclip_ = engine_.CreateSoundclip();
+  ASSERT_TRUE(soundclip_ != NULL);
+  EXPECT_EQ(0, voe_.GetNumChannels());
+  EXPECT_EQ(1, voe_sc_.GetNumChannels());
+  int channel_num = voe_sc_.GetLastChannel();
+  EXPECT_TRUE(voe_sc_.GetPlayout(channel_num));
+  delete soundclip_;
+  soundclip_ = NULL;
+  EXPECT_EQ(0, voe_sc_.GetNumChannels());
+}
+
+// Tests playing out a fake sound.
+TEST_F(WebRtcVoiceEngineTest, PlaySoundclip) {
+  static const char kZeroes[16000] = {};
+  EXPECT_TRUE(engine_.Init());
+  soundclip_ = engine_.CreateSoundclip();
+  ASSERT_TRUE(soundclip_ != NULL);
+  EXPECT_TRUE(soundclip_->PlaySound(kZeroes, sizeof(kZeroes), 0));
+}
+
+TEST_F(WebRtcVoiceEngineTest, MediaEngineCallbackOnError) {
+  talk_base::scoped_ptr<ChannelErrorListener> listener;
+  cricket::WebRtcVoiceMediaChannel* media_channel;
+  unsigned int ssrc = 0;
+
+  EXPECT_TRUE(SetupEngine());
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+
+  media_channel = reinterpret_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
+  listener.reset(new ChannelErrorListener(channel_));
+
+  // Test on WebRtc VoE channel.
+  voe_.TriggerCallbackOnError(media_channel->voe_channel(),
+                               VE_SATURATION_WARNING);
+  EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_DEVICE_SATURATION,
+            listener->error());
+  EXPECT_NE(-1, voe_.GetLocalSSRC(voe_.GetLastChannel(), ssrc));
+  EXPECT_EQ(ssrc, listener->ssrc());
+
+  listener->Reset();
+  voe_.TriggerCallbackOnError(-1, VE_TYPING_NOISE_WARNING);
+  EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_TYPING_NOISE_DETECTED,
+            listener->error());
+  EXPECT_EQ(0U, listener->ssrc());
+
+  // Add another stream and test on that.
+  ++ssrc;
+  EXPECT_TRUE(channel_->AddStream(ssrc));
+  listener->Reset();
+  voe_.TriggerCallbackOnError(voe_.GetLastChannel(),
+                               VE_SATURATION_WARNING);
+  EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_DEVICE_SATURATION,
+            listener->error());
+  EXPECT_EQ(ssrc, listener->ssrc());
+
+  // Testing a non-existing channel.
+  listener->Reset();
+  voe_.TriggerCallbackOnError(voe_.GetLastChannel() + 2,
+                               VE_SATURATION_WARNING);
+  EXPECT_EQ(0, listener->error());
+}
+
+TEST_F(WebRtcVoiceEngineTest, TestSetPlayoutError) {
+  EXPECT_TRUE(SetupEngine());
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kPcmuCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+  EXPECT_TRUE(channel_->AddStream(2));
+  EXPECT_TRUE(channel_->AddStream(3));
+  EXPECT_TRUE(channel_->SetPlayout(true));
+  voe_.set_playout_fail_channel(voe_.GetLastChannel() - 1);
+  EXPECT_TRUE(channel_->SetPlayout(false));
+  EXPECT_FALSE(channel_->SetPlayout(true));
+}
+
+// Tests for the actual WebRtc VoE library.
+
+// Tests that the library initializes and shuts down properly.
+TEST(WebRtcVoiceEngineLibTest, StartupShutdown) {
+  cricket::WebRtcVoiceEngine engine;
+  EXPECT_TRUE(engine.Init());
+  cricket::VoiceMediaChannel* channel = engine.CreateChannel();
+  EXPECT_TRUE(channel != NULL);
+  delete channel;
+  engine.Terminate();
+
+  // Reinit to catch regression where VoiceEngineObserver reference is lost
+  EXPECT_TRUE(engine.Init());
+  engine.Terminate();
+}
+
+// Tests that the logging from the library is cleartext.
+// TODO: This test case is disabled due to a known bug in VoE4.0
+// which sends out truncated log message. Will be fixed in next VoE release.
+TEST(WebRtcVoiceEngineLibTest, DISABLED_HasUnencryptedLogging) {
+  cricket::WebRtcVoiceEngine engine;
+  talk_base::scoped_ptr<talk_base::MemoryStream> stream(
+      new talk_base::MemoryStream);
+  size_t size = 0;
+  bool cleartext = true;
+  talk_base::LogMessage::AddLogToStream(stream.get(), talk_base::LS_VERBOSE);
+  engine.SetLogging(talk_base::LS_VERBOSE, "");
+  EXPECT_TRUE(engine.Init());
+  EXPECT_TRUE(stream->GetSize(&size));
+  EXPECT_GT(size, 0U);
+  engine.Terminate();
+  talk_base::LogMessage::RemoveLogToStream(stream.get());
+  const char* buf = stream->GetBuffer();
+  for (size_t i = 0; i < size && cleartext; ++i) {
+    int ch = static_cast<int>(buf[i]);
+    ASSERT_GE(ch, 0) << "Out of bounds character in WebRtc VoE log: "
+                     << std::hex << ch;
+    cleartext = (isprint(ch) || isspace(ch));
+  }
+  EXPECT_TRUE(cleartext);
+}
+
+// Tests we do not see any references to a monitor thread being spun up
+// when initiating the engine.
+TEST(WebRtcVoiceEngineLibTest, HasNoMonitorThread) {
+  cricket::WebRtcVoiceEngine engine;
+  talk_base::scoped_ptr<talk_base::MemoryStream> stream(
+      new talk_base::MemoryStream);
+  talk_base::LogMessage::AddLogToStream(stream.get(), talk_base::LS_VERBOSE);
+  engine.SetLogging(talk_base::LS_VERBOSE, "");
+  EXPECT_TRUE(engine.Init());
+  engine.Terminate();
+  talk_base::LogMessage::RemoveLogToStream(stream.get());
+
+  size_t size = 0;
+  EXPECT_TRUE(stream->GetSize(&size));
+  EXPECT_GT(size, 0U);
+  const std::string logs(stream->GetBuffer());
+  EXPECT_NE(std::string::npos, logs.find("ProcessThread"));
+}
+
+// Tests that the library is configured with the codecs we want.
+TEST(WebRtcVoiceEngineLibTest, HasCorrectCodecs) {
+  cricket::WebRtcVoiceEngine engine;
+  // Check codecs by name.
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "ISAC", 16000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "ISAC", 32000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "iLBC", 8000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "PCMU", 8000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "PCMA", 8000, 0, 1, 0)));
+// TODO: Add speex back, once webrtc supports it.
+//  EXPECT_TRUE(engine.FindCodec(
+//      cricket::AudioCodec(96, "speex", 16000, 0, 1, 0)));
+//  EXPECT_TRUE(engine.FindCodec(
+//      cricket::AudioCodec(96, "speex", 8000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "G722", 16000, 0, 1, 0)));
+//  EXPECT_TRUE(engine.FindCodec(
+//      cricket::AudioCodec(96, "GSM", 8000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "red", 8000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "CN", 32000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "CN", 16000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "CN", 8000, 0, 1, 0)));
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(96, "telephone-event", 8000, 0, 1, 0)));
+  // Check codecs with an id by id.
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(0, "", 8000, 0, 1, 0)));   // PCMU
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(8, "", 8000, 0, 1, 0)));   // PCMA
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(9, "", 16000, 0, 1, 0)));   // G722
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(13, "", 8000, 0, 1, 0)));  // CN
+  // Check sample/bitrate matching.
+  EXPECT_TRUE(engine.FindCodec(
+      cricket::AudioCodec(0, "PCMU", 8000, 64000, 1, 0)));
+  // Check that bad codecs fail.
+  EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(99, "ABCD", 0, 0, 1, 0)));
+  EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(88, "", 0, 0, 1, 0)));
+  EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 0, 2, 0)));
+  EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 5000, 0, 1, 0)));
+  EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 5000, 1, 0)));
+  // Check that there aren't any extra codecs lying around.
+  EXPECT_EQ(11U, engine.codecs().size());
+  // Verify the payload id of common audio codecs, including CN, ISAC, and G722.
+  // TODO: WebRtc team may change the payload id.
+  for (std::vector<cricket::AudioCodec>::const_iterator it =
+      engine.codecs().begin(); it != engine.codecs().end(); ++it) {
+    if (it->name == "CN" && it->clockrate == 16000) {
+      EXPECT_EQ(98, it->id);
+    } else if (it->name == "CN" && it->clockrate == 32000) {
+      EXPECT_EQ(99, it->id);
+    } else if (it->name == "ISAC" && it->clockrate == 16000) {
+      EXPECT_EQ(103, it->id);
+    } else if (it->name == "ISAC" && it->clockrate == 32000) {
+      EXPECT_EQ(104, it->id);
+    } else if (it->name == "G722" && it->clockrate == 16000) {
+      EXPECT_EQ(9, it->id);
+    }
+  }
+
+  engine.Terminate();
+}
+
+// Tests that the list of supported codecs is created properly and ordered
+// correctly
+TEST_F(WebRtcVoiceEngineTest, CodecPreference) {
+  cricket::WebRtcVoiceEngine engine;
+  const std::vector<cricket::AudioCodec>& codecs = engine.codecs();
+  ASSERT_FALSE(codecs.empty());
+  EXPECT_EQ("ISAC", codecs[0].name);
+  EXPECT_EQ(16000, codecs[0].clockrate);
+  EXPECT_EQ(32000, codecs[0].bitrate);
+  int pref = codecs[0].preference;
+  for (size_t i = 1; i < codecs.size(); ++i) {
+    EXPECT_GT(pref, codecs[i].preference);
+    pref = codecs[i].preference;
+  }
+}
+
+TEST(WebRtcVoiceEngineLibTest, Has32Channels) {
+  cricket::WebRtcVoiceEngine engine;
+  EXPECT_TRUE(engine.Init());
+
+  cricket::VoiceMediaChannel* channels[32];
+  int num_channels = 0;
+
+  while (num_channels < ARRAY_SIZE(channels)) {
+    cricket::VoiceMediaChannel* channel = engine.CreateChannel();
+    if (!channel)
+      break;
+
+    channels[num_channels++] = channel;
+  }
+
+  int expected = ARRAY_SIZE(channels);
+  EXPECT_EQ(expected, num_channels);
+
+  while (num_channels > 0) {
+    delete channels[--num_channels];
+  }
+
+  engine.Terminate();
+}
+
+#ifdef WIN32
+// Test our workarounds to WebRtc VoE' munging of the coinit count
+TEST(WebRtcVoiceEngineLibTest, CoInitialize) {
+  cricket::WebRtcVoiceEngine* engine = new cricket::WebRtcVoiceEngine();
+
+  // Initial refcount should be 0.
+  EXPECT_EQ(S_OK, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+
+  // Engine should start even with COM already inited.
+  EXPECT_TRUE(engine->Init());
+  engine->Terminate();
+  EXPECT_TRUE(engine->Init());
+  engine->Terminate();
+
+  // Refcount after terminate should be 1 (in reality 3); test if it is nonzero.
+  EXPECT_EQ(S_FALSE, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+  // Decrement refcount to (hopefully) 0.
+  CoUninitialize();
+  CoUninitialize();
+  delete engine;
+
+  // Ensure refcount is 0.
+  EXPECT_EQ(S_OK, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+  CoUninitialize();
+}
+#endif
diff --git a/talk/sound/nullsoundsystem.cc b/talk/sound/nullsoundsystem.cc
new file mode 100644
index 0000000..2920008
--- /dev/null
+++ b/talk/sound/nullsoundsystem.cc
@@ -0,0 +1,174 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/sound/nullsoundsystem.h"
+
+#include "talk/base/logging.h"
+#include "talk/sound/sounddevicelocator.h"
+#include "talk/sound/soundinputstreaminterface.h"
+#include "talk/sound/soundoutputstreaminterface.h"
+
+namespace talk_base {
+
+class Thread;
+
+}
+
+namespace cricket {
+
+// Name used for the single device and the sound system itself.
+static const char kNullName[] = "null";
+
+class NullSoundDeviceLocator : public SoundDeviceLocator {
+ public:
+  NullSoundDeviceLocator() : SoundDeviceLocator(kNullName, kNullName) {}
+
+  virtual SoundDeviceLocator *Copy() const {
+    return new NullSoundDeviceLocator();
+  }
+};
+
+class NullSoundInputStream : public SoundInputStreamInterface {
+ public:
+  virtual bool StartReading() {
+    return true;
+  }
+
+  virtual bool StopReading() {
+    return true;
+  }
+
+  virtual bool GetVolume(int *volume) {
+    *volume = SoundSystemInterface::kMinVolume;
+    return true;
+  }
+
+  virtual bool SetVolume(int volume) {
+    return false;
+  }
+
+  virtual bool Close() {
+    return true;
+  }
+
+  virtual int LatencyUsecs() {
+    return 0;
+  }
+};
+
+class NullSoundOutputStream : public SoundOutputStreamInterface {
+ public:
+  virtual bool EnableBufferMonitoring() {
+    return true;
+  }
+
+  virtual bool DisableBufferMonitoring() {
+    return true;
+  }
+
+  virtual bool WriteSamples(const void *sample_data,
+                            size_t size) {
+    LOG(LS_VERBOSE) << "Got " << size << " bytes of playback samples";
+    return true;
+  }
+
+  virtual bool GetVolume(int *volume) {
+    *volume = SoundSystemInterface::kMinVolume;
+    return true;
+  }
+
+  virtual bool SetVolume(int volume) {
+    return false;
+  }
+
+  virtual bool Close() {
+    return true;
+  }
+
+  virtual int LatencyUsecs() {
+    return 0;
+  }
+};
+
+NullSoundSystem::~NullSoundSystem() {
+}
+
+bool NullSoundSystem::Init() {
+  return true;
+}
+
+void NullSoundSystem::Terminate() {
+  // Nothing to do.
+}
+
+bool NullSoundSystem::EnumeratePlaybackDevices(
+      SoundSystemInterface::SoundDeviceLocatorList *devices) {
+  ClearSoundDeviceLocatorList(devices);
+  SoundDeviceLocator *device;
+  GetDefaultPlaybackDevice(&device);
+  devices->push_back(device);
+  return true;
+}
+
+bool NullSoundSystem::EnumerateCaptureDevices(
+      SoundSystemInterface::SoundDeviceLocatorList *devices) {
+  ClearSoundDeviceLocatorList(devices);
+  SoundDeviceLocator *device;
+  GetDefaultCaptureDevice(&device);
+  devices->push_back(device);
+  return true;
+}
+
+bool NullSoundSystem::GetDefaultPlaybackDevice(
+    SoundDeviceLocator **device) {
+  *device = new NullSoundDeviceLocator();
+  return true;
+}
+
+bool NullSoundSystem::GetDefaultCaptureDevice(
+    SoundDeviceLocator **device) {
+  *device = new NullSoundDeviceLocator();
+  return true;
+}
+
+SoundOutputStreamInterface *NullSoundSystem::OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params) {
+  return new NullSoundOutputStream();
+}
+
+SoundInputStreamInterface *NullSoundSystem::OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params) {
+  return new NullSoundInputStream();
+}
+
+const char *NullSoundSystem::GetName() const {
+  return kNullName;
+}
+
+}  // namespace cricket
diff --git a/talk/sound/nullsoundsystem.h b/talk/sound/nullsoundsystem.h
new file mode 100644
index 0000000..3edb4f9
--- /dev/null
+++ b/talk/sound/nullsoundsystem.h
@@ -0,0 +1,70 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SOUND_NULLSOUNDSYSTEM_H_
+#define TALK_SOUND_NULLSOUNDSYSTEM_H_
+
+#include "talk/sound/soundsysteminterface.h"
+
+namespace cricket {
+
+class SoundDeviceLocator;
+class SoundInputStreamInterface;
+class SoundOutputStreamInterface;
+
+// A simple reference sound system that drops output samples and generates
+// no input samples.
+class NullSoundSystem : public SoundSystemInterface {
+ public:
+  static SoundSystemInterface *Create() {
+    return new NullSoundSystem();
+  }
+
+  virtual ~NullSoundSystem();
+
+  virtual bool Init();
+  virtual void Terminate();
+
+  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
+  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
+
+  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+  virtual SoundInputStreamInterface *OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+
+  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
+  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
+
+  virtual const char *GetName() const;
+};
+
+}  // namespace cricket
+
+#endif  // TALK_SOUND_NULLSOUNDSYSTEM_H_
diff --git a/talk/sound/nullsoundsystemfactory.cc b/talk/sound/nullsoundsystemfactory.cc
new file mode 100644
index 0000000..089d51f
--- /dev/null
+++ b/talk/sound/nullsoundsystemfactory.cc
@@ -0,0 +1,49 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice, 
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products 
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/sound/nullsoundsystemfactory.h"
+
+#include "talk/sound/nullsoundsystem.h"
+
+namespace cricket {
+
+NullSoundSystemFactory::NullSoundSystemFactory() {
+}
+
+NullSoundSystemFactory::~NullSoundSystemFactory() {
+}
+
+bool NullSoundSystemFactory::SetupInstance() {
+  instance_.reset(new NullSoundSystem());
+  return true;
+}
+
+void NullSoundSystemFactory::CleanupInstance() {
+  instance_.reset();
+}
+
+}  // namespace cricket
diff --git a/talk/sound/nullsoundsystemfactory.h b/talk/sound/nullsoundsystemfactory.h
new file mode 100644
index 0000000..71ae980
--- /dev/null
+++ b/talk/sound/nullsoundsystemfactory.h
@@ -0,0 +1,50 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice, 
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products 
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SOUND_NULLSOUNDSYSTEMFACTORY_H_
+#define TALK_SOUND_NULLSOUNDSYSTEMFACTORY_H_
+
+#include "talk/sound/soundsystemfactory.h"
+
+namespace cricket {
+
+// A SoundSystemFactory that always returns a NullSoundSystem. Intended for
+// testing.
+class NullSoundSystemFactory : public SoundSystemFactory {
+ public:
+  NullSoundSystemFactory();
+  virtual ~NullSoundSystemFactory();
+
+ protected:
+  // Inherited from SoundSystemFactory.
+  virtual bool SetupInstance();
+  virtual void CleanupInstance();
+};
+
+}  // namespace cricket
+
+#endif  // TALK_SOUND_NULLSOUNDSYSTEMFACTORY_H_
diff --git a/talk/xmpp/constants.cc b/talk/xmpp/constants.cc
index a1aaa63..cf63e79 100644
--- a/talk/xmpp/constants.cc
+++ b/talk/xmpp/constants.cc
@@ -131,6 +131,13 @@
 const std::string STR_RESULT("result");
 const std::string STR_ERROR("error");
 
+const std::string STR_FORM("form");
+const std::string STR_SUBMIT("submit");
+const std::string STR_TEXT_SINGLE("text-single");
+const std::string STR_LIST_SINGLE("list-single");
+const std::string STR_LIST_MULTI("list-multi");
+const std::string STR_HIDDEN("hidden");
+const std::string STR_FORM_TYPE("FORM_TYPE");
 
 const std::string STR_FROM("from");
 const std::string STR_TO("to");
@@ -160,7 +167,11 @@
 
 const std::string STR_UNAVAILABLE("unavailable");
 
-const std::string STR_MUC_LOOKUP_DOMAIN("lookup.groupchat.google.com");
+const Jid JID_GOOGLE_MUC_LOOKUP("lookup.groupchat.google.com");
+const std::string STR_MUC_ROOMCONFIG_ROOMNAME("muc#roomconfig_roomname");
+const std::string STR_MUC_ROOMCONFIG_FEATURES("muc#roomconfig_features");
+const std::string STR_MUC_ROOM_FEATURE_ENTERPRISE("muc_enterprise");
+const std::string STR_MUC_ROOMCONFIG("http://jabber.org/protocol/muc#roomconfig");
 
 const QName QN_STREAM_STREAM(true, NS_STREAM, STR_STREAM);
 const QName QN_STREAM_FEATURES(true, NS_STREAM, "features");
@@ -379,6 +390,22 @@
 const QName QN_DISCO_ITEMS_QUERY(true, NS_DISCO_ITEMS, "query");
 const QName QN_DISCO_ITEM(true, NS_DISCO_ITEMS, "item");
 
+// JEP 0020
+const std::string NS_FEATURE("http://jabber.org/protocol/feature-neg");
+const QName QN_FEATURE_FEATURE(true, NS_FEATURE, "feature");
+
+// JEP 0004
+const std::string NS_XDATA("jabber:x:data");
+const QName QN_XDATA_X(true, NS_XDATA, "x");
+const QName QN_XDATA_INSTRUCTIONS(true, NS_XDATA, "instructions");
+const QName QN_XDATA_TITLE(true, NS_XDATA, "title");
+const QName QN_XDATA_FIELD(true, NS_XDATA, "field");
+const QName QN_XDATA_REPORTED(true, NS_XDATA, "reported");
+const QName QN_XDATA_ITEM(true, NS_XDATA, "item");
+const QName QN_XDATA_DESC(true, NS_XDATA, "desc");
+const QName QN_XDATA_REQUIRED(true, NS_XDATA, "required");
+const QName QN_XDATA_VALUE(true, NS_XDATA, "value");
+const QName QN_XDATA_OPTION(true, NS_XDATA, "option");
 
 // JEP 0045
 const std::string NS_MUC("http://jabber.org/protocol/muc");
@@ -404,7 +431,7 @@
 const QName QN_SEARCH_QUERY(true, NS_SEARCH, "query");
 const QName QN_SEARCH_ITEM(true, NS_SEARCH, "item");
 const QName QN_SEARCH_ROOM_NAME(true, NS_SEARCH, "room-name");
-const QName QN_SEARCH_ORGANIZERS_DOMAIN(true, NS_SEARCH, "organizers-domain");
+const QName QN_SEARCH_ROOM_DOMAIN(true, NS_SEARCH, "room-domain");
 const QName QN_SEARCH_ROOM_JID(true, NS_SEARCH, "room-jid");
 
 
diff --git a/talk/xmpp/constants.h b/talk/xmpp/constants.h
index ecc8e00..ba9c9e1 100644
--- a/talk/xmpp/constants.h
+++ b/talk/xmpp/constants.h
@@ -85,6 +85,13 @@
 extern const std::string STR_RESULT;
 extern const std::string STR_ERROR;
 
+extern const std::string STR_FORM;
+extern const std::string STR_SUBMIT;
+extern const std::string STR_TEXT_SINGLE;
+extern const std::string STR_LIST_SINGLE;
+extern const std::string STR_LIST_MULTI;
+extern const std::string STR_HIDDEN;
+extern const std::string STR_FORM_TYPE;
 
 extern const std::string STR_FROM;
 extern const std::string STR_TO;
@@ -114,7 +121,10 @@
 
 extern const std::string STR_UNAVAILABLE;
 
-extern const std::string STR_MUC_LOOKUP_DOMAIN;
+extern const Jid JID_GOOGLE_MUC_LOOKUP;
+extern const std::string STR_MUC_ROOMCONFIG_ROOMNAME;
+extern const std::string STR_MUC_ROOMCONFIG_FEATURES;
+extern const std::string STR_MUC_ROOM_FEATURE_ENTERPRISE;
 
 extern const QName QN_STREAM_STREAM;
 extern const QName QN_STREAM_FEATURES;
@@ -345,6 +355,22 @@
 extern const QName QN_DISCO_ITEMS_QUERY;
 extern const QName QN_DISCO_ITEM;
 
+// JEP 0020
+extern const std::string NS_FEATURE;
+extern const QName QN_FEATURE_FEATURE;
+
+// JEP 0004
+extern const std::string NS_XDATA;
+extern const QName QN_XDATA_X;
+extern const QName QN_XDATA_INSTRUCTIONS;
+extern const QName QN_XDATA_TITLE;
+extern const QName QN_XDATA_FIELD;
+extern const QName QN_XDATA_REPORTED;
+extern const QName QN_XDATA_ITEM;
+extern const QName QN_XDATA_DESC;
+extern const QName QN_XDATA_REQUIRED;
+extern const QName QN_XDATA_VALUE;
+extern const QName QN_XDATA_OPTION;
 
 // JEP 0045
 extern const std::string NS_MUC;
@@ -371,7 +397,7 @@
 extern const QName QN_SEARCH_ITEM;
 extern const QName QN_SEARCH_ROOM_NAME;
 extern const QName QN_SEARCH_ROOM_JID;
-extern const QName QN_SEARCH_ORGANIZERS_DOMAIN;
+extern const QName QN_SEARCH_ROOM_DOMAIN;
 
 
 // JEP 0115
diff --git a/talk/xmpp/iqtask.cc b/talk/xmpp/iqtask.cc
index f319990..f54f630 100644
--- a/talk/xmpp/iqtask.cc
+++ b/talk/xmpp/iqtask.cc
@@ -73,13 +73,13 @@
   if (success) {
     HandleResult(stanza);
   } else {
-    SignalError(stanza->FirstNamed(QN_ERROR));
+    SignalError(this, stanza->FirstNamed(QN_ERROR));
   }
   return STATE_DONE;
 }
 
 int IqTask::OnTimeout() {
-  SignalError(NULL);
+  SignalError(this, NULL);
   return XmppTask::OnTimeout();
 }
 
diff --git a/talk/xmpp/iqtask.h b/talk/xmpp/iqtask.h
index 7d9d621..589616b 100644
--- a/talk/xmpp/iqtask.h
+++ b/talk/xmpp/iqtask.h
@@ -42,7 +42,8 @@
          buzz::XmlElement* el);
   virtual ~IqTask() {}
 
-  sigslot::signal1<const XmlElement*> SignalError;
+  sigslot::signal2<IqTask*,
+                   const XmlElement*> SignalError;
 
  protected:
   virtual void HandleResult(const buzz::XmlElement* element) = 0;
diff --git a/talk/xmpp/mucroomconfigtask.cc b/talk/xmpp/mucroomconfigtask.cc
new file mode 100644
index 0000000..272bd44
--- /dev/null
+++ b/talk/xmpp/mucroomconfigtask.cc
@@ -0,0 +1,91 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include <vector>
+
+#include "talk/xmpp/mucroomconfigtask.h"
+
+#include "talk/base/scoped_ptr.h"
+#include "talk/xmpp/constants.h"
+
+namespace buzz {
+
+MucRoomConfigTask::MucRoomConfigTask(
+    XmppTaskParentInterface* parent,
+    const Jid& room_jid,
+    const std::string& room_name,
+    const std::vector<std::string>& room_features)
+    : IqTask(parent, STR_SET, room_jid,
+             MakeRequest(room_name, room_features)),
+      room_jid_(room_jid) {
+}
+
+XmlElement* MucRoomConfigTask::MakeRequest(
+    const std::string& room_name,
+    const std::vector<std::string>& room_features) {
+  buzz::XmlElement* owner_query = new
+      buzz::XmlElement(buzz::QN_MUC_OWNER_QUERY, true);
+
+  buzz::XmlElement* x_form = new buzz::XmlElement(buzz::QN_XDATA_X, true);
+  x_form->SetAttr(buzz::QN_TYPE, buzz::STR_FORM);
+
+  buzz::XmlElement* roomname_field =
+      new buzz::XmlElement(buzz::QN_XDATA_FIELD, false);
+  roomname_field->SetAttr(buzz::QN_VAR, buzz::STR_MUC_ROOMCONFIG_ROOMNAME);
+  roomname_field->SetAttr(buzz::QN_TYPE, buzz::STR_TEXT_SINGLE);
+
+  buzz::XmlElement* roomname_value =
+      new buzz::XmlElement(buzz::QN_XDATA_VALUE, false);
+  roomname_value->SetBodyText(room_name);
+
+  roomname_field->AddElement(roomname_value);
+  x_form->AddElement(roomname_field);
+
+  buzz::XmlElement* features_field =
+      new buzz::XmlElement(buzz::QN_XDATA_FIELD, false);
+  features_field->SetAttr(buzz::QN_VAR, buzz::STR_MUC_ROOMCONFIG_FEATURES);
+  features_field->SetAttr(buzz::QN_TYPE, buzz::STR_LIST_MULTI);
+
+  for (std::vector<std::string>::const_iterator feature = room_features.begin();
+       feature != room_features.end(); ++feature) {
+    buzz::XmlElement* features_value =
+        new buzz::XmlElement(buzz::QN_XDATA_VALUE, false);
+    features_value->SetBodyText(*feature);
+    features_field->AddElement(features_value);
+  }
+
+  x_form->AddElement(features_field);
+  owner_query->AddElement(x_form);
+  return owner_query;
+}
+
+void MucRoomConfigTask::HandleResult(const XmlElement* element) {
+  SignalResult(this);
+}
+
+}  // namespace buzz
diff --git a/talk/xmpp/mucroomconfigtask.h b/talk/xmpp/mucroomconfigtask.h
new file mode 100644
index 0000000..ba0dbaa
--- /dev/null
+++ b/talk/xmpp/mucroomconfigtask.h
@@ -0,0 +1,64 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_XMPP_MUCROOMCONFIGTASK_H_
+#define TALK_XMPP_MUCROOMCONFIGTASK_H_
+
+#include <string>
+#include "talk/xmpp/iqtask.h"
+
+namespace buzz {
+
+// This task configures the muc room for document sharing and other enterprise
+// specific goodies.
+class MucRoomConfigTask : public IqTask {
+ public:
+  MucRoomConfigTask(XmppTaskParentInterface* parent,
+                    const Jid& room_jid,
+                    const std::string& room_name,
+                    const std::vector<std::string>& room_features);
+
+  // Room configuration does not return any reasonable error
+  // values. The First config request configures the room, subseqent
+  // ones are just ignored by server and server returns empty
+  // response.
+  sigslot::signal1<MucRoomConfigTask*> SignalResult;
+
+  const Jid& room_jid() const { return room_jid_; }
+
+ protected:
+  virtual void HandleResult(const XmlElement* stanza);
+
+ private:
+  static XmlElement* MakeRequest(const std::string& room_name,
+                                 const std::vector<std::string>& room_features);
+  Jid room_jid_;
+};
+
+}  // namespace buzz
+
+#endif  // TALK_XMPP_MUCROOMCONFIGTASK_H_
diff --git a/talk/xmpp/mucroomlookuptask.cc b/talk/xmpp/mucroomlookuptask.cc
index e6a204a..278dc38 100644
--- a/talk/xmpp/mucroomlookuptask.cc
+++ b/talk/xmpp/mucroomlookuptask.cc
@@ -35,28 +35,30 @@
 namespace buzz {
 
 MucRoomLookupTask::MucRoomLookupTask(XmppTaskParentInterface* parent,
+                                     const Jid& lookup_server_jid,
                                      const std::string& room_name,
-                                     const std::string& organizer_domain)
-    : IqTask(parent, STR_SET, Jid(STR_MUC_LOOKUP_DOMAIN),
-             MakeRoomQuery(room_name, organizer_domain)) {
+                                     const std::string& room_domain)
+    : IqTask(parent, STR_SET, lookup_server_jid,
+             MakeNameQuery(room_name, room_domain)) {
 }
 
 MucRoomLookupTask::MucRoomLookupTask(XmppTaskParentInterface* parent,
+                                     const Jid& lookup_server_jid,
                                      const Jid& room_jid)
-    : IqTask(parent, STR_SET, Jid(STR_MUC_LOOKUP_DOMAIN),
+    : IqTask(parent, STR_SET, lookup_server_jid,
              MakeJidQuery(room_jid)) {
 }
 
-XmlElement* MucRoomLookupTask::MakeRoomQuery(const std::string& room_name,
-    const std::string& org_domain) {
-  XmlElement* room_elem = new XmlElement(QN_SEARCH_ROOM_NAME, false);
-  room_elem->SetBodyText(room_name);
+XmlElement* MucRoomLookupTask::MakeNameQuery(
+    const std::string& room_name, const std::string& room_domain) {
+  XmlElement* name_elem = new XmlElement(QN_SEARCH_ROOM_NAME, false);
+  name_elem->SetBodyText(room_name);
 
-  XmlElement* domain_elem = new XmlElement(QN_SEARCH_ORGANIZERS_DOMAIN, false);
-  domain_elem->SetBodyText(org_domain);
+  XmlElement* domain_elem = new XmlElement(QN_SEARCH_ROOM_DOMAIN, false);
+  domain_elem->SetBodyText(room_domain);
 
   XmlElement* query = new XmlElement(QN_SEARCH_QUERY, true);
-  query->AddElement(room_elem);
+  query->AddElement(name_elem);
   query->AddElement(domain_elem);
   return query;
 }
@@ -72,37 +74,37 @@
 
 void MucRoomLookupTask::HandleResult(const XmlElement* stanza) {
   const XmlElement* query_elem = stanza->FirstNamed(QN_SEARCH_QUERY);
-  if (query_elem != NULL) {
-    const XmlElement* item_elem =
-        query_elem->FirstNamed(QN_SEARCH_ITEM);
-    if (item_elem != NULL && item_elem->HasAttr(QN_JID)) {
-      MucRoomInfo room_info;
-      if (GetRoomInfoFromResponse(item_elem, &room_info)) {
-        SignalResult(room_info);
-        return;
-      }
-    }
+  if (query_elem == NULL) {
+    SignalError(this, NULL);
+    return;
   }
 
-  SignalError(NULL);
-}
+  const XmlElement* item_elem = query_elem->FirstNamed(QN_SEARCH_ITEM);
+  if (item_elem == NULL) {
+    SignalError(this, NULL);
+    return;
+  }
 
-bool MucRoomLookupTask::GetRoomInfoFromResponse(
-    const XmlElement* stanza, MucRoomInfo* info) {
+  MucRoomInfo room;
+  room.jid = Jid(item_elem->Attr(buzz::QN_JID));
+  if (!room.jid.IsValid()) {
+    SignalError(this, NULL);
+    return;
+  }
 
-  info->room_jid = Jid(stanza->Attr(buzz::QN_JID));
-  if (!info->room_jid.IsValid()) return false;
+  const XmlElement* room_name_elem =
+      item_elem->FirstNamed(QN_SEARCH_ROOM_NAME);
+  if (room_name_elem != NULL) {
+    room.name = room_name_elem->BodyText();
+  }
 
-  const XmlElement* room_name_elem = stanza->FirstNamed(QN_SEARCH_ROOM_NAME);
-  const XmlElement* org_domain_elem =
-      stanza->FirstNamed(QN_SEARCH_ORGANIZERS_DOMAIN);
+  const XmlElement* room_domain_elem =
+      item_elem->FirstNamed(QN_SEARCH_ROOM_DOMAIN);
+  if (room_domain_elem != NULL) {
+    room.domain = room_domain_elem->BodyText();
+  }
 
-  if (room_name_elem != NULL)
-    info->room_name = room_name_elem->BodyText();
-  if (org_domain_elem != NULL)
-    info->organizer_domain = org_domain_elem->BodyText();
-
-  return true;
+  SignalResult(this, room);
 }
 
 }  // namespace buzz
diff --git a/talk/xmpp/mucroomlookuptask.h b/talk/xmpp/mucroomlookuptask.h
index e8e6c76..ec5873c 100644
--- a/talk/xmpp/mucroomlookuptask.h
+++ b/talk/xmpp/mucroomlookuptask.h
@@ -34,28 +34,35 @@
 namespace buzz {
 
 struct MucRoomInfo {
-  Jid room_jid;
-  std::string room_name;
-  std::string organizer_domain;
+  Jid jid;
+  std::string name;
+  std::string domain;
+
+  std::string full_name() const {
+    return name + "@" + domain;
+  }
 };
 
 class MucRoomLookupTask : public IqTask {
  public:
   MucRoomLookupTask(XmppTaskParentInterface* parent,
+                    const Jid& lookup_jid,
                     const std::string& room_name,
-                    const std::string& organizer_domain);
+                    const std::string& room_domain);
   MucRoomLookupTask(XmppTaskParentInterface* parent,
+                    const Jid& lookup_jid,
                     const Jid& room_jid);
 
-  sigslot::signal1<const MucRoomInfo&> SignalResult;
+  sigslot::signal2<MucRoomLookupTask*,
+                   const MucRoomInfo&> SignalResult;
+
+ protected:
+  virtual void HandleResult(const XmlElement* element);
 
  private:
-  static XmlElement* MakeRoomQuery(const std::string& room_name,
-                                   const std::string& org_domain);
+  static XmlElement* MakeNameQuery(const std::string& room_name,
+                                   const std::string& room_domain);
   static XmlElement* MakeJidQuery(const Jid& room_jid);
-  virtual void HandleResult(const XmlElement* element);
-  static bool GetRoomInfoFromResponse(const XmlElement* stanza,
-                                      MucRoomInfo* info);
 };
 
 }  // namespace buzz