<?xml version="1.0" encoding="US-ASCII"?> version='1.0' encoding='utf-8'?>
<!DOCTYPE rfc SYSTEM "rfc2629.dtd" [
<!ENTITY RFC2119 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2119.xml">
<!ENTITY RFC3552 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3552.xml">
<!ENTITY RFC3552 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3552.xml">
<!ENTITY RFC2818 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2818.xml">
<!ENTITY RFC3261 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3261.xml">
<!ENTITY RFC3711 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3711.xml">
<!ENTITY RFC5479 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.5479.xml">
<!ENTITY RFC6347 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6347.xml">
<!ENTITY RFC4568 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.4568.xml">
<!ENTITY RFC5763 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.5763.xml">
<!ENTITY RFC4251 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.4251.xml">
<!ENTITY RFC3760 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3760.xml">
<!ENTITY RFC6189 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6189.xml">
<!ENTITY RFC8445 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.8445.xml">
<!ENTITY RFC6454 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6454.xml">
<!ENTITY RFC6455 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6455.xml">
<!ENTITY RFC6222 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6222.xml">
<!ENTITY RFC7022 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7022.xml">
<!ENTITY RFC7675 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7675.xml">
<!ENTITY RFC8174 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.8174.xml">
<!ENTITY RFC6749 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6749.xml">
<!ENTITY RFC7033 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7033.xml">

<!ENTITY I-D.ietf-rtcweb-security-arch SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-rtcweb-security-arch.xml">
<!ENTITY I-D.ietf-rtcweb-overview SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-rtcweb-overview.xml">
<!ENTITY I-D.ietf-rtcweb-ip-handling SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-rtcweb-ip-handling.xml">

]>
<?xml-stylesheet type="text/xsl" href="rfc2629.xslt" ?>
<?rfc toc="yes" ?>
<?rfc symrefs="yes" ?>
<?rfc strict="yes" ?>
<?rfc compact="yes" ?>
<?rfc sortrefs="yes" ?>
<?rfc colonspace="yes" ?>
<?rfc rfcedstyle="no" ?>
<!-- Don't change this. It breaks stuff -->
<?rfc tocdepth="4"?> "rfc2629-xhtml.ent">

<rfc xmlns:xi="http://www.w3.org/2001/XInclude" submissionType="IETF"
     category="std" consensus="true" number="8826"
     docName="draft-ietf-rtcweb-security-12"
     ipr="pre5378Trust200902"> ipr="pre5378Trust200902"
     obsoletes="" updates="" xml:lang="en" tocInclude="true" tocDepth="4"
     symRefs="true" sortRefs="true" version="3">
  <!-- xml2rfc v2v3 conversion 2.34.0 -->
  <front>
    <title abbrev="WebRTC Security">Security Considerations for WebRTC</title>
    <seriesInfo name="RFC" value="8826"/>
    <author fullname="Eric Rescorla" initials="E.K." initials="E." surname="Rescorla">
      <organization>RTFM, Inc.</organization>
      <address>
        <postal>
          <street>2064 Edgewood Drive</street>
          <city>Palo Alto</city>
          <region>CA</region>
          <code>94303</code>

          <country>USA</country>
          <country>United States of America</country>
        </postal>
        <phone>+1 650 678 2350</phone>
        <email>ekr@rtfm.com</email>
      </address>
    </author>
    <date year="2019" />

    <area>ART</area>

    <workgroup>RTC-Web</workgroup> month="October" year="2020"/>

<!-- [rfced] Please insert any keywords (beyond those that appear in
the title) for use on https://www.rfc-editor.org/search. -->
    <keyword>example</keyword>

    <abstract>
      <t>

<!-- [rfced] In this cluster, we have been expanding WebRTC in the body of the
document (but not the title) as Web Real-Time Communication.  Do you want to
include this expansion somewhere, or is not needed with the current
explanatory text?

Original (first occurrence):
   WebRTC is a protocol suite for use with real-time applications that
   can be deployed in browsers - "real time communication on the Web".
   This document defines the WebRTC threat model and analyzes the
   security threats of WebRTC in that model.
-->

      <t>
        WebRTC is a protocol suite for use with
	real-time applications that can be deployed in browsers -- "real-time
	communication on the Web".  This document defines the WebRTC threat
	model and analyzes the security threats of WebRTC in that model.
      </t>
    </abstract>
  </front>
  <middle>
    <section title="Introduction" anchor="sec.introduction"> anchor="sec.introduction" numbered="true" toc="default">
      <name>Introduction</name>
      <t>
        The Real-Time Communications on the Web (RTCWEB) working group Working Group has standardized
        protocols for real-time communications between Web browsers, generally
        called "WebRTC" <xref target="I-D.ietf-rtcweb-overview"/>. target="RFC8825" format="default"/>. The
        major use cases for WebRTC technology are real-time audio and/or video calls,
        Web conferencing, and direct data transfer. Unlike most conventional real-time systems, systems
        (e.g., SIP-based <xref target="RFC3261" /> format="default"/> soft phones)
	phones), WebRTC communications are directly controlled by some Web
	server. A simple case is shown below.
      </t>
      <figure title="A simple WebRTC system" anchor="fig.simple">
        <artwork><![CDATA[
        <name>A Simple WebRTC System</name>
        <artwork name="" type="" align="left" alt=""><![CDATA[
                          +----------------+
                          |                |
                          |   Web Server   |
                          |                |
                          +----------------+
                              ^        ^
                             /          \
                     HTTP   /            \   HTTP
                      or   /              \   or
               WebSockets /                \ WebSockets
                         v                  v
                      JS API              JS API
                +-----------+            +-----------+
                |           |    Media   |           |
                |  Browser  |<---------->|  Browser  |
                |           |            |           |
                +-----------+            +-----------+
                    Alice                     Bob ]]></artwork>
      </figure>
      <t>
        In the system shown in <xref target="fig.simple"/>, target="fig.simple" format="default"/>, Alice and Bob both have
        WebRTC-enabled browsers and they visit some Web server which that operates a
        calling service. Each of their browsers exposes standardized JavaScript
        (JS) calling APIs (implemented as browser built-ins)
        which are used by the Web server to set up a call between Alice and Bob.
        The Web server also serves as the signaling channel to transport
        control messages between the browsers.
        While this system is topologically similar to a conventional SIP-based
        system (with the Web server acting as the signaling service and browsers
        acting as softphones), control has moved to the central Web server;
        the browser simply provides API points that are used by the calling service.
        As with any Web application, the Web server can move logic between
        the server and JavaScript in the browser, but regardless of where the
        code is executing, it is ultimately under control of the server.
      </t>
      <t>
        It should be immediately apparent that this type of system poses new
        security challenges beyond those of a conventional VoIP Voice over IP (VoIP) system. In particular,
        it needs to contend with malicious calling services.
        For example, if the calling service
        can cause the browser to make a call at any time to any callee of its
        choice, then this facility can be used to bug a user's computer without
        their knowledge, simply by placing a call to some recording service.
        More subtly, if the exposed APIs allow the server to instruct the
        browser to send arbitrary content, then they can be used to bypass
        firewalls or mount denial of service DoS attacks. Any successful system
        will need to be resistant to this and other attacks.
      </t>
      <t>
        A companion document <xref target="I-D.ietf-rtcweb-security-arch"/> target="RFC8827" format="default"/> describes a security
        architecture intended to address the issues raised in this document.
      </t>
    </section>
    <section anchor="sec-term" title="Terminology"> numbered="true" toc="default">
      <name>Terminology</name>
    <t>The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
      "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", "<bcp14>MUST</bcp14>", "<bcp14>MUST NOT</bcp14>",
    "<bcp14>REQUIRED</bcp14>", "<bcp14>SHALL</bcp14>",
    "<bcp14>SHALL NOT</bcp14>", "<bcp14>SHOULD</bcp14>",
    "<bcp14>SHOULD NOT</bcp14>",
    "<bcp14>RECOMMENDED</bcp14>", "<bcp14>NOT RECOMMENDED</bcp14>",
    "<bcp14>MAY</bcp14>", and
      "OPTIONAL" "<bcp14>OPTIONAL</bcp14>" in this document are
    to be interpreted as described in
      BCP 14 BCP&nbsp;14 <xref target="RFC2119"/>
    <xref target="RFC8174"/> when, and only when, they appear in all capitals,
    as shown here.</t>
    </section>
    <section title="The anchor="sec.web-security" numbered="true" toc="default">
      <name>The Browser Threat Model" anchor="sec.web-security"> Model</name>
      <t>
        The security requirements for WebRTC follow directly from the
        requirement that the browser's job is to protect the user.
        Huang et al. <xref target="huang-w2sp"/> target="huang-w2sp" format="default"/> summarize
	the core browser security guarantee as: as follows:
      </t>
      <t>
        <list style="hanging">
          <t>
            Users
<!-- DNE -->
      <ul empty="true">
      <li>Users can safely visit arbitrary web sites and execute scripts provided by those sites.
          </t>
        </list>
      </t>
      <t></t> sites.</li></ul>

      <t>
        It is important to realize that this includes sites hosting arbitrary malicious
        scripts. The motivation for this requirement is simple: it is trivial for attackers
        to divert users to sites of their choice. For instance, an attacker can purchase
        display advertisements which direct the user (either automatically or via user
        clicking) to their site, at which point the browser will execute the attacker's
        scripts. Thus, it is important that it be safe to view arbitrarily malicious pages.
        Of course, browsers inevitably have bugs which cause them to fall short of this
        goal, but any new WebRTC functionality must be designed with the intent to
        meet this standard. The remainder of this section provides more background
        on the existing Web security model.
      </t>
      <t>
        In this model, then, the browser acts as a Trusted Coomputing Computing Base (TCB) both
        from the user's perspective and to some extent from the server's. While HTML
        and JavaScript (JS) provided by the server can cause the browser to execute a variety of
        actions, those scripts operate in a sandbox that isolates them both from
        the user's computer and from each other, as detailed below.
      </t>
      <t>
        Conventionally, we refer to either Web attackers, who are able to induce
        you to visit their sites but do not control the network, and network
        attackers, who are able to control your network.

<!-- [rfced] Section 3:  Should "network, and" be "network, or," or
should the word "either" be removed?

Original:
 Conventionally, we refer to either web attackers, who are able to
 induce you to visit their sites but do not control the network, and
 network attackers, who are able to control your network. -->

 Network attackers correspond
        to the <xref target="RFC3552"/> target="RFC3552" format="default"/> "Internet Threat Model". Note that in some
        cases, a network attacker is also a web Web attacker, since transport protocols
        that do not provide integrity protection allow the network to inject traffic
        as if they were any communications peer. TLS, and HTTPS in particular, prevent
        against these attacks, but when analyzing HTTP connections, we must assume
        that traffic is going to the attacker.
      </t>
      <section title="Access anchor="sec.resources" numbered="true" toc="default">
        <name>Access to Local Resources" anchor="sec.resources"> Resources</name>
        <t>
          While the browser has access to local resources such as keying material,
          files, the camera, and the microphone, it strictly limits or forbids web Web
          servers from accessing those same resources. For instance, while it is possible
          to produce an HTML form which will allow file upload, a script cannot do
          so without user consent and in fact cannot even suggest a specific file
          (e.g., /etc/passwd); the user must explicitly select the file and consent
          to its upload. [Note: in (Note: In many cases cases, browsers are explicitly designed to
          avoid dialogs with the semantics of "click here to bypass security checks", as
          extensive research <xref target="cranor-wolf"/> target="cranor-wolf" format="default"/> shows that users are prone to
          consent under such circumstances.] circumstances.)
        </t>
        <t>
          Similarly, while Flash programs (SWFs) <xref target="SWF"/> target="SWF" format="default"/> can access the camera and microphone, they
          explicitly require that the user consent to that access. In addition,
          some resources simply cannot be accessed from the browser at all. For
          instance, there is no real way to run specific executables directly from a
          script (though the user can of course be induced to download executable
          files and run them).
        </t>
      </section>
      <section title="Same-Origin Policy" anchor="sec.same-origin"> anchor="sec.same-origin" numbered="true" toc="default">
        <name>Same-Origin Policy</name>
        <t>
          Many other resources are accessible but isolated. For instance,
          while scripts are allowed to make HTTP requests via the XMLHttpRequest() API (see <xref target="XmlHttpRequest"/>) target="XmlHttpRequest" format="default"/>)
          those requests are not allowed to be made to any server, but rather solely
          to the same ORIGIN from whence the script came <xref target="RFC6454"/> target="RFC6454" format="default"/>
          (although CORS Cross-Origin Resource Sharing (CORS) <xref target="CORS"/> target="CORS" format="default"/> and WebSockets
          <xref target="RFC6455"/> target="RFC6455" format="default"/> provide an escape hatch from this restriction,
          as described below.) below).
          This SAME ORIGIN SAME-ORIGIN POLICY (SOP) prevents server A from mounting attacks
          on server B via the user's browser, which protects both the user
          (e.g., from misuse of his credentials) and the server B (e.g., from
          DoS attack).
        </t>
        <t>
          More generally, SOP forces scripts from each site to run in their own, isolated,
          sandboxes. While there are techniques to allow them to interact, those interactions
          generally must be mutually consensual (by each site) attacks).

<!-- [rfced] Section 3.2:  Are "ORIGIN" and are limited to certain
          channels. For instance, multiple pages/browser panes from "SAME ORIGIN POLICY"
written in all capitals for emphasis (in which case perhaps we
could use the <strong> element (Section 2.50 of RFC 7991)), or should
we write them as "origin" (as used elsewhere in this document and
this cluster) and "Same-Origin Policy" (as used elsewhere in this
document and in several published RFCs)?

Also, per the "Gender-Specific Language" section of
<https://www.rfc-editor.org/styleguide/part2/>, please let us know
if we may change instances of "his," "him," "himself," and "he" to
"their," "them," "themselves," and "they."

Original "(as described below.)" has been fixed):
 For instance,
 while scripts are allowed to make HTTP requests via the
 XMLHttpRequest() API (see [XmlHttpRequest]) those requests are not
 allowed to be made to any server, but rather solely to the same
 ORIGIN from whence the script came [RFC6454] (although CORS [CORS]
 and WebSockets [RFC6455] provide an escape hatch from this
 restriction, as described below.)  This SAME ORIGIN POLICY (SOP)
 prevents server A from mounting attacks on server B via the user's
 browser, which protects both the user (e.g., from misuse of his
 credentials) and the server B (e.g., from DoS attack). -->

        </t>
        <t>
          More generally, SOP forces scripts from each site to run in their own, isolated,
          sandboxes. While there are techniques to allow them to interact, those interactions
          generally must be mutually consensual (by each site) and are limited to certain
          channels. For instance, multiple pages / browser panes from the same origin
          can read each other's JS variables, but pages from the different origins--or
	  origins -- or
          even iframes from different origins on the same page--cannot. page -- cannot.
        </t>

<!-- TODO: Picture [rfced] Section 3.3:  We found a "TODO: Picture" comment in the
XML file, just after the section title.  Is a diagram missing? -->

      </section>
      <section title="Bypassing anchor="sec.cors-etc" numbered="true" toc="default">
        <name>Bypassing SOP: CORS, WebSockets, and consent Consent to communicate" anchor="sec.cors-etc"> Communicate</name>
        <t>
          While SOP serves an important security function, it also makes it inconvenient to
          write certain classes of applications. In particular, mash-ups, in which a script
          from origin A uses resources from origin B, can only be achieved via a certain amount of hackery.
          The W3C Cross-Origin Resource Sharing (CORS) CORS spec <xref target="CORS"/> target="CORS" format="default"/> is a response to this
          demand. In CORS, when a script from origin A executes what would otherwise be a forbidden
          cross-origin request, the browser instead contacts the target server to determine
          whether it is willing to allow cross-origin requests from A. If it is so willing,
          the browser then allows the request. This consent verification process is designed
          to safely allow cross-origin requests.
        </t>
        <t>
          While CORS is designed to allow cross-origin HTTP requests, WebSockets <xref target="RFC6455"/> target="RFC6455" format="default"/> allows
          cross-origin establishment of transparent channels. Once a WebSockets connection
          has been established from a script to a site, the script can exchange any traffic it
          likes without being required to frame it as a series of HTTP request/response
          transactions. As with CORS, a WebSockets transaction starts with a consent verification
          stage to avoid allowing scripts to simply send arbitrary data to another origin.
        </t>
        <t>
          While consent verification is conceptually simple--just simple -- just do a handshake before you
          start exchanging the real data--experience data -- experience has shown that designing a
          correct consent verification system is difficult. In particular, Huang et al. <xref target="huang-w2sp"/> target="huang-w2sp" format="default"/>
          have shown vulnerabilities in the existing Java and Flash consent verification
          techniques and in a simplified version of the WebSockets handshake. In particular,
          it is important to be wary of CROSS-PROTOCOL attacks in which the attacking script
          generates traffic which is acceptable to some non-Web protocol state machine.
          In order to resist this form of attack, WebSockets incorporates a masking technique
          intended to randomize the bits on the wire, thus making it more difficult to generate
          traffic which resembles a given protocol.
        </t>
      </section>
    </section>
    <section title="Security anchor="sec.rtc-web" numbered="true" toc="default">
      <name>Security for WebRTC Applications" anchor="sec.rtc-web"> Applications</name>
      <section title="Access anchor="sec.rtc-dev-access" numbered="true" toc="default">
        <name>Access to Local Devices" anchor="sec.rtc-dev-access"> Devices</name>
        <t>
          As discussed in <xref target="sec.introduction"/>, target="sec.introduction" format="default"/>, allowing arbitrary
          sites to initiate calls violates the core Web security guarantee;
          without some access restrictions on local devices, any malicious site
          could simply bug a user. At minimum, then, it MUST NOT <bcp14>MUST NOT</bcp14> be possible for
          arbitrary sites to initiate calls to arbitrary locations without user
          consent. This immediately raises the question, however, of what should
          be the scope of user consent.
        </t>
        <t>
          In order for the user to
          make an intelligent decision about whether to allow a call
          (and hence his camera and microphone input to be routed somewhere),
          he must understand either who is requesting access, where the media
          is going, or both. As detailed below, there are two basic conceptual
          models:
        </t>
        <t>
          <list style="numbers">
            <t>You
        <ol spacing="normal" type="1">
          <li>You are sending your media to entity A because you want to
            talk to Entity entity A (e.g., your mother).</t>
            <t>Entity mother).</li>
          <li>Entity A (e.g., a calling service) asks to access the user's devices with the assurance
            that it will transfer the media to entity B (e.g., your mother)</t>
          </list>
        </t> mother).</li>
        </ol>
        <t>
          In either case, identity is at the heart of any consent decision.
          Moreover, the identity of the party the browser is connecting to is all that the browser can meaningfully enforce;
          if you are calling A, A can simply forward the media to C. Similarly,
          if you authorize A to place a call to B, A can call C instead.
          In either cases, case, all the browser is able to do is verify and check
          authorization for whoever is controlling where the media goes.
          The target of the media can of course advertise a security/privacy
          policy, but this is not something that the browser can
          enforce. Even so, there are a variety of different consent scenarios
          that motivate different technical consent mechanisms.
          We discuss these mechanisms in the sections below.
        </t>
        <t>
          It's important to understand that consent to access local devices
          is largely orthogonal to consent to transmit various kinds of
          data over the network (see <xref target="sec.rtc-comm-consent"/>). target="sec.rtc-comm-consent" format="default"/>).
          Consent for device access is largely a matter of protecting
          the user's privacy from malicious sites. By contrast,
          consent to send network traffic is about preventing the
          user's browser from being used to attack its local network.
          Thus, we need to ensure communications consent even if the
          site is not able to access the camera and microphone at
          all (hence WebSockets's consent mechanism) and similarly mechanism); similarly,
          we need to be concerned with the site accessing the
          user's camera and microphone even if the data is to be
          sent back to the site via conventional HTTP-based network
          mechanisms such as HTTP POST.
        </t>
        <section title="Threats numbered="true" toc="default">
          <name>Threats from Screen Sharing"> Sharing</name>
          <t>
            In addition to camera and microphone access, there has been
            demand for screen and/or application sharing functionality.
            Unfortunately, the security implications of this
            functionality are much harder for users to intuitively
            analyze than for camera and microphone access.
            (See http://lists.w3.org/Archives/Public/public-webrtc/2013Mar/0024.html <eref brackets="angle" target="https://lists.w3.org/Archives/Public/public-webrtc/2013Mar/0024.html"/>
            for a full analysis.)
          </t>
          <t>
            The most obvious threats are simply those of "oversharing".
            I.e.,
            That is, the user may believe they are sharing a window when
            in fact they are sharing an application, or may forget they
            are sharing their whole screen, icons, notifications, and all.
            This is already an issue with existing screen sharing technologies
            and is made somewhat worse if a partially trusted site is responsible for asking
            for the resource to be shared rather than having the user propose it.
          </t>
          <t>
            A less obvious threat involves the impact of screen sharing on the
            Web security model. A key part of the Same-Origin Policy is that
            HTML or JS from site A can reference content from site B and cause
            the browser to load it, but (unless explicitly permitted) cannot
            see the result. However, if a web Web application from a site is
            screen sharing the browser, then this violates that invariant,
            with serious security consequences. For example, an attacker site
            might request screen sharing and then briefly open up a new
            Window
            window to the user's bank or webmail account, using screen sharing
            to read the resulting displayed content. A more sophisticated
            attack would be to open up a source view window to a site and use the
            screen sharing result to view anti cross-site anti-cross-site request forgery tokens.
          </t>
          <t>
            These threats suggest that screen/application sharing might need
            a higher level of user consent than access to the camera or
            microphone.
          </t>
        </section>
        <section title="Calling numbered="true" toc="default">
          <name>Calling Scenarios and User Expectations"> Expectations</name>
          <t>
            While a large number of possible calling scenarios are possible, the
            scenarios discussed in this section illustrate many of
            the difficulties of identifying the relevant scope of consent.
          </t>
          <section title="Dedicated numbered="true" toc="default">
            <name>Dedicated Calling Services"> Services</name>
            <t>
              The first scenario we consider is a dedicated calling service. In this
              case, the user has a relationship with a calling site
              and repeatedly makes calls on it. It is likely
              that rather than having to give permission for each call
              that call,
              the user will want to give the calling service long-term
              access to the camera and microphone. This is a natural fit
              for a long-term consent mechanism (e.g., installing an
              app store "application" to indicate permission for the
              calling service.) service).
              A variant of the dedicated calling service is a gaming site
              (e.g., a poker site) which hosts a dedicated calling service
              to allow players to call each other.
            </t>
            <t>
              With any kind of service where the user may use the same
              service to talk to many different people, there is a question
              about whether the user can know who they are talking to.
              If I grant permission to calling service A to make calls
              on my behalf, then I am implicitly granting it permission
              to bug my computer whenever it wants. This suggests another
              consent model in which a site is authorized to make calls
              but only to certain target entities (identified via
              media-plane cryptographic mechanisms as described in
              <xref target="sec.during-attack"/> target="sec.during-attack" format="default"/> and especially
              <xref target="sec.third-party-id"/>.) target="sec.third-party-id" format="default"/>). Note that the
              question of consent here is related to but
              distinct from the question of peer identity: I
              might be willing to allow a calling site to in general
              initiate calls on my behalf but still have some calls
              via that site where I can be sure that the site is not
              listening in.
            </t>
          </section>
          <section title="Calling numbered="true" toc="default">
            <name>Calling the Site You're On"> On</name>
            <t>
              Another simple scenario is calling the site you're actually visiting.
              The paradigmatic case here is the "click here to talk to a
              representative" windows that appear on many shopping sites.
              In this case, the user's expectation is that they are
              calling the site they're actually visiting. However, it is
              unlikely that they want to provide a general consent to such
              a site; just because I want some information on a car
              doesn't mean that I want the car manufacturer to be able
              to activate my microphone whenever they please. Thus,
              this suggests the need for a second consent mechanism
              where I only grant consent for the duration of a given
              call. As described in <xref target="sec.resources"/>, target="sec.resources" format="default"/>,
              great care must be taken in the design of this interface
              to avoid the users just clicking through. Note also
              that the user interface chrome, which is the representation
              through which the user interacts with the user agent itself,
              must clearly display elements
              showing that the call is continuing in order to avoid attacks
              where the calling site just leaves it up indefinitely but
              shows a Web UI that implies otherwise.
            </t>
          </section>
        </section>
        <section title="Origin-Based Security"> numbered="true" toc="default">
          <name>Origin-Based Security</name>
          <t>
          Now that we have described the calling scenarios, we can start to reason about
          the security requirements.
          </t>
          <t>
          As discussed in <xref target="sec.same-origin"/>, target="sec.same-origin" format="default"/>, the basic unit of
          Web sandboxing is the origin, and so it is natural to scope consent
          to the origin. Specifically, a script from origin A MUST <bcp14>MUST</bcp14> only be allowed
          to initiate communications (and hence to access the camera and microphone)
          if the user has specifically authorized access for that origin.
          It is of course technically possible to have coarser-scoped permissions,
          but because the Web model is scoped to the origin, this creates a difficult
          mismatch.
          </t>
          <t>
          Arguably, the origin is not fine-grained enough. Consider the situation where
          Alice visits a site and authorizes it to make a single call. If consent is
          expressed solely in terms of the origin, then at upon any future visit to that
          site (including one induced via a mash-up or ad network), the site can
          bug Alice's computer, use the computer to place bogus calls, etc.
          While in principle Alice could grant and then
          revoke the privilege, in practice privileges accumulate; if we are concerned
          about this attack, something else is needed. There are a number of potential countermeasures to
          this sort of issue.
          </t>
        <t><list style="hanging">
          <t hangText="Individual Consent"></t><t>Ask
          <dl newline="true" spacing="normal">
            <dt>Individual Consent</dt>
            <dd>Ask the user for permission for each call.</t>
          <t></t>
          <t hangText="Callee-oriented Consent"></t><t>Only call.</dd>
            <dt>Callee-oriented Consent</dt>
            <dd>Only allow calls to a given user.</t>
          <t></t>
          <t hangText="Cryptographic Consent"></t><t>Only user.</dd>
            <dt>Cryptographic Consent</dt>
            <dd>Only allow calls to a given set of peer keying material or
          to a cryptographically established identity.</t>
        </list>
        </t> identity.</dd>
          </dl>
          <t>
          Unfortunately, none of these approaches is satisfactory for all cases.
          As discussed above, individual consent puts the user's approval
          in the UI flow for every call. Not only does this quickly become annoying
          but it can train the user to simply click "OK", at which point the consent becomes
          useless. Thus, while it may be necessary to have individual consent in some
          case,
          cases, this is not a suitable solution for (for instance) the calling
          service case. Where necessary, in-flow user interfaces must be carefully
          designed to avoid the risk of the user blindly clicking through.
          </t>
          <t>
          The other two options are designed to restrict calls to a given target.
          Callee-oriented consent provided by the calling site
          would not work well because a malicious site can claim that the
          user is calling any user of his choice. One fix for this is to tie calls to a
          cryptographically-established
          cryptographically established identity. While not suitable for all cases,
          this approach may be useful for some. If we consider the case
          of advertising, it's not particularly convenient
          to require the advertiser to instantiate an iframe on the hosting site just
          to get permission; a more convenient approach is to cryptographically tie
          the advertiser's certificate to the communication directly. We're still
          tying permissions to the origin here, but to the media origin (and-or (and/or destination)
          rather than to the Web origin. <xref target="I-D.ietf-rtcweb-security-arch"/> target="RFC8827" format="default"/>
          describes mechanisms which facilitate this sort of consent.
          </t>
          <t>
          Another case where media-level cryptographic identity makes sense is when a user
          really does not trust the calling site. For instance, I might be worried that
          the calling service will attempt to bug my computer, but I also want to be
          able to conveniently call my friends. If consent is tied to particular
          communications endpoints, then my risk is limited. Naturally, it
          is somewhat challenging to design UI primitives which that express this sort
          of policy. The problem becomes even more challenging in multi-user
          calling cases.
          </t>
        </section>
        <section title="Security numbered="true" toc="default">
          <name>Security Properties of the Calling Page"> Page</name>
          <t>
          Origin-based security is intended to secure against web Web attackers. However, we must
          also consider the case of network attackers. Consider the case where I have
          granted permission to a calling service by an origin that has the HTTP scheme,
          e.g., http://calling-service.example.com. &lt;http://calling-service.example.com&gt;. If I ever use my computer on
          an unsecured network (e.g., a hotspot or if my own home wireless network
          is insecure), and browse any HTTP site, then an attacker can bug my computer. The attack proceeds
          like this:
          </t>
        <t>
          <list style="numbers">
            <t>I
          <ol spacing="normal" type="1">
            <li>I connect to http://anything.example.org/. &lt;http://anything.example.org/&gt;. Note that this site is unaffiliated
            with the calling service.</t>
            <t>The service.</li>
            <li>The attacker modifies my HTTP connection to inject an IFRAME (or a redirect)
            to http://calling-service.example.com</t>
            <t>The &lt;http://calling-service.example.com&gt;.</li>
            <li>The attacker forges the response from  http://calling-service.example.com/ &lt;http://calling-service.example.com/&gt; to
            inject JS to initiate a call to himself.</t>
          </list>
        </t> himself.</li>
          </ol>

          <t>
          Note that this attack does not depend on the media being insecure. Because the
          call is to the attacker, it is also encrypted to him. Moreover, it need not
          be executed immediately; the attacker can "infect" the origin semi-permanently
          (e.g., with a web Web worker or a popped-up window that is hidden under the main window.) window)
          and thus be able to bug me long
          after I have left the infected network. This risk is created by allowing
          calls at all from a page fetched over HTTP.
          </t>
          <t>
          Even if calls are only possible from HTTPS [RFC2818] <xref target="RFC2818" format="default"/> sites,
          if those sites include active content (e.g., JavaScript) from an untrusted
          site, that JavaScript is executed in the security context of the page
          <xref target="finer-grained"/>. target="finer-grained" format="default"/>. This could lead to compromise of a call
          even if the parent page is safe. Note: this This issue is not restricted
          to PAGES which contain untrusted content.

<!-- [rfced] Section 4.1.4:  Is "PAGES" capped for emphasis, or
should it be "pages"?  (We see "a page" and "the page" used in nearby
text in this section.)  If emphasis is desired, perhaps we could use
the <strong> element (Section 2.50 of RFC 7991) here.

Original:
 Note: this issue is not restricted to PAGES
 which contain untrusted content. -->

 If any page from a
          given origin ever loads JavaScript from an attacker, then it is
          possible for that attacker to infect the browser's notion of that
          origin semi-permanently.
          </t>
        </section>
      </section>
      <section title="Communications anchor="sec.rtc-comm-consent" numbered="true" toc="default">
        <name>Communications Consent Verification" anchor="sec.rtc-comm-consent"> Verification</name>
        <t>
          As discussed in <xref target="sec.cors-etc"/>, target="sec.cors-etc" format="default"/>, allowing web Web applications unrestricted network access
          via the browser introduces the risk of using the browser as an attack platform against
          machines which would not otherwise be accessible to the malicious site,
	  site -- for
          instance
          instance, because they are topologically restricted (e.g., behind a firewall or NAT).
          In order to prevent this form of attack as well as cross-protocol attacks attacks, it is
          important to require that the target of traffic explicitly consent to receiving
          the traffic in question. Until that consent has been verified for a given endpoint,
          traffic other than the consent handshake MUST NOT <bcp14>MUST NOT</bcp14> be sent to that endpoint.
        </t>
        <t>
          Note that consent verification is not sufficient to prevent overuse of
          network resources. Because WebRTC allows for a Web site to create
          data flows between two browser instances without user consent, it is
          possible for a malicious site to chew up a significant amount of a user's
          bandwidth without incurring significant costs to himself by setting
          up such a channel to another user. However, as a practical matter
          there are a large number of Web sites which can act as data sources,
          so an attacker can at least use downlink bandwidth with existing
          Web APIs. However, this potential DoS vector reinforces the need
          for adequate congestion control for WebRTC protocols to ensure that
          they play fair with other demands on the user's bandwidth.
        </t>
        <section title="ICE" anchor="sec.ice"> anchor="sec.ice" numbered="true" toc="default">
          <name>ICE</name>
          <t>
          Verifying receiver consent requires some sort of explicit handshake, but conveniently
          we already need one in order to do NAT hole-punching. Interactive Connectivity Establishment (ICE)  <xref target="RFC8445"/> target="RFC8445" format="default"/> includes a handshake
          designed to verify that the receiving element wishes to receive traffic from the
          sender. It
          is important to remember here that the site initiating ICE is
          presumed malicious; in order for the handshake to be secure secure, the
          receiving element MUST <bcp14>MUST</bcp14> demonstrate receipt/knowledge of some value
          not available to the site (thus preventing the site from forging
          responses).  In order to achieve this objective with ICE, the STUN
	  Session Traversal Utilities for NAT (STUN)
          transaction IDs must be generated by the browser and MUST NOT <bcp14>MUST NOT</bcp14> be made
          available to the initiating script, even via a diagnostic interface.
          Verifying receiver consent also requires verifying the receiver wants
          to receive traffic from a particular sender, and at this time; for
          example
          example, a malicious site may simply attempt ICE to known servers
          that are using ICE for other sessions.  ICE provides this verification
          as well, by using the STUN credentials as a form of per-session shared
          secret.  Those credentials are known to the Web application, but would
          need to also be known and used by the STUN-receiving element to be useful.
          </t>
          <t>
            There also needs to be some mechanism for the browser to verify that
            the target of the traffic continues to wish to receive it. Because ICE keepalives are
            indications, they will not work here.
            <xref target="RFC7675"/> target="RFC7675" format="default"/> describes the mechanism
            for providing consent freshness.
          </t>
        </section>
        <section title="Masking" anchor="sec.masking"> anchor="sec.masking" numbered="true" toc="default">
          <name>Masking</name>
          <t>
            Once consent is verified, there still is some concern about misinterpretation
            attacks as described by Huang et al.<xref target="huang-w2sp"/>. al. <xref target="huang-w2sp" format="default"/>.
            Where TCP is used used, the risk is substantial due to the potential
            presence of transparent proxies and therefore proxies; therefore, if TCP is to be used,
            then WebSockets style WebSockets-style masking MUST <bcp14>MUST</bcp14> be employed.
          </t>
          <t>
            Since DTLS (with the anti-chosen plaintext mechanisms required by
            TLS 1.1) does not allow the attacker to generate predictable
            ciphertext, there is no need for masking of protocols running over
            DTLS (e.g. (e.g., SCTP over DTLS, UDP over DTLS, etc.).
          </t>
          <t>
            Note that in principle an attacker could exert some control
            over SRTP Secure Real-time Transport Protocol (SRTP) packets by using a combination of the WebAudio API
            and extremely tight timing control.
            The primary risk here seems to be carriage of SRTP over TURN Traversal
	    Using Relays around NAT (TURN) TCP.
            However, as SRTP packets have an extremely characteristic packet
            header it seems unlikely that any but the most aggressive
            intermediaries would be confused into thinking that another
            application layer
            application-layer protocol was in use.
          </t>
        </section>
        <section title="Backward Compatibility"> numbered="true" toc="default">
          <name>Backward Compatibility</name>
          <t>
            A requirement to use ICE limits compatibility with legacy non-ICE clients.
            It seems unsafe to completely remove the requirement for some check.
            All proposed checks have the common feature that the browser
            sends some message to the candidate traffic recipient
            and refuses to send other traffic until that message has been
            replied to. The message/reply pair must be generated in such
            a way that an attacker who controls the Web application
            cannot forge them, generally by having the message contain some
            secret value that must be incorporated (e.g., echoed, hashed into,
            etc.). Non-ICE candidates for this role (in cases where the
            legacy endpoint has a public address) include:
          </t>
          <t>
            <list style="symbols">
              <t>STUN
          <ul spacing="normal">
            <li>STUN checks without using ICE (i.e., the non-RTC-web endpoint sets up a STUN responder.)</t>
              <t>Use responder).</li>
            <li>Use of RTCP the RTP Control Protocol (RTCP) as an implicit reachability check.</t>
            </list>
          </t> check.</li>
          </ul>
          <t>
            In the RTCP approach, the WebRTC endpoint is allowed to send
            a limited number of RTP packets prior to receiving consent. This
            allows a short window of attack. In addition, some legacy endpoints
            do not support RTCP, so this is a much more expensive solution for
            such endpoints, for which it would likely be easier to implement ICE.
            For these two reasons, an RTCP-based approach does not seem to
            address the security issue satisfactorily.
          </t>
          <t>
            In the STUN approach, the WebRTC endpoint is able to verify that
            the recipient is running some kind of STUN endpoint but unless
            the STUN responder is integrated with the ICE username/password
            establishment system, the WebRTC endpoint cannot verify that
            the recipient consents to this particular call. This may be an
            issue if existing STUN servers are operated at addresses that
            are not able to handle bandwidth-based attacks. Thus, this
            approach does not seem satisfactory either.
          </t>
          <t>
            If the systems are tightly integrated (i.e., the STUN endpoint responds with
            responses authenticated with ICE credentials) credentials), then this issue
            does not exist. However, such a design is very close to an ICE-Lite
            implementation (indeed, arguably is one).
            An intermediate approach would be to have a STUN extension that indicated
            that one was responding to WebRTC checks but not computing
            integrity checks based on the ICE credentials. This would allow the
            use of standalone STUN servers without the risk of confusing them
            with legacy STUN servers. If a non-ICE legacy solution is needed,
            then this is probably the best choice.
          </t>
          <t>
            Once initial consent is verified, we also need to verify continuing
            consent, in order to avoid attacks where two people briefly share
            an IP (e.g., behind a NAT in an Internet cafe) and the attacker
            arranges for a large, unstoppable, traffic flow to the
            network and then leaves. The appropriate technologies here are
            fairly similar to those for initial consent, though are perhaps
            weaker since the threats are less severe.
          </t>
        </section>
        <section title="IP anchor="sec.ip.location" numbered="true" toc="default">
          <name>IP Location Privacy" anchor="sec.ip.location"> Privacy</name>
          <t>
            Note that as soon as the callee sends their ICE candidates, the
            caller learns the callee's IP addresses. The callee's server-reflexive
            address reveals a lot of information about the callee's location.

<!-- [rfced] Section 4.2.4:  Per author feedback for RFC 8839 and per
other documents in this cluster, we hyphenated the term "server
reflexive".  Please let us know any objections.

Original:
 The callee's server
 reflexive address reveals a lot of information about the callee's
 location.

Currently:
 The callee's server-
 reflexive address reveals a lot of information about the callee's
 location. -->

            In order to avoid tracking, implementations may wish to suppress
            the start of ICE negotiation until the callee has answered. In
            addition, either side may wish to hide their location from the other
            side entirely by forcing all traffic through a TURN server.
          </t>
          <t>
            In ordinary operation, the site learns the browser's IP address,
            though it may be hidden via mechanisms like Tor  [http://www.torproject.org] <eref
	    brackets="angle" target="https://www.torproject.org"/> or a VPN.
            However, because sites can cause the browser to provide
            IP addresses, this provides a mechanism for sites to learn
            about the user's network environment even if the user is behind
            a VPN that masks their IP address. Implementations may wish
            to provide settings which suppress all non-VPN candidates if
            the user is on certain kinds of VPN, especially privacy-oriented
            systems such as Tor.   See <xref target="I-D.ietf-rtcweb-ip-handling"/> target="RFC8828" format="default"/>
            for additional information.
          </t>
        </section>
      </section>
      <section title="Communications Security" anchor="sec.rtc-comsec"> anchor="sec.rtc-comsec" numbered="true" toc="default">
        <name>Communications Security</name>
        <t>
          Finally, we consider a problem familiar from the SIP world: communications security.
          For obvious reasons, it MUST <bcp14>MUST</bcp14> be possible for the communicating parties to establish
          a channel which is secure against both message recovery and message modification.
          (See <xref target="RFC5479"/> target="RFC5479" format="default"/> for more details.)
          This service must be provided for both data and voice/video.
          Ideally the same security mechanisms would be used for both types of content.
          Technology for providing this
          service (for instance, SRTP <xref target="RFC3711"/>, target="RFC3711" format="default"/>, DTLS <xref target="RFC6347"/> target="RFC6347" format="default"/>, and
          DTLS-SRTP <xref target="RFC5763"/>) target="RFC5763" format="default"/>) is well understood. However, we must
          examine this technology in the WebRTC context, where the threat
          model is somewhat different.
        </t>
        <t>
          In general, it is important to understand that unlike a conventional SIP proxy,
          the calling service (i.e., the Web server) controls not only the channel
          between the communicating endpoints but also the application running on
          the user's browser.
          While in principle it is possible for the browser to cut the calling service
          out of the loop and directly present trusted information (and perhaps get
          consent), practice in modern browsers is to avoid this whenever possible.
          "In-flow"
          "In&nbhy;flow" modal dialogs which require the user to consent to specific
          actions are particularly disfavored as human factors research indicates
          that unless they are made extremely invasive, users simply agree to
          them without actually consciously giving consent. consent <xref target="abarth-rtcweb"/>. target="abarth-rtcweb" format="default"/>.
          Thus, nearly all the UI will necessarily be rendered by the
          browser but under control of the calling service. This likely includes the
          peer's identity information, which, after all, is only meaningful in
          the context of some calling service.
        </t>
        <t>
          This limitation does not mean that preventing attack by the calling service
          is completely hopeless. However, we need to distinguish between two
          classes of attack:
        </t>
        <t><list style="hanging">
          <t hangText="Retrospective
        <dl newline="true" spacing="normal">
          <dt>Retrospective compromise of calling service."></t><t>The service:</dt>
          <dd>The calling service
          is non-malicious during a call but subsequently is compromised and wishes to
          attack an older call (often called a "passive attack")</t>
          <t></t>
          <t hangText="During-call attack").</dd>
          <dt>During-call attack by calling service."></t><t>The service:</dt>
          <dd>The calling service is compromised
          during the call it wishes to attack (often called an "active attack").</t>
          </list>
          </t> attack").</dd>
        </dl>
        <t>
          Providing security against the former type of attack is practical using the
          techniques discussed in <xref target="sec.retrospective-compromise"/>. target="sec.retrospective-compromise" format="default"/>.
          However, it is extremely difficult to prevent a
          trusted but malicious calling service from actively attacking a user's calls,
          either by mounting a Man-in-the-Middle (MITM) attack or by diverting them entirely.
          (Note that this attack applies equally to a network attacker if communications
          to the calling service are not secured.) We discuss some potential approaches
          and why they are likely to be impractical in <xref target="sec.during-attack"/>. target="sec.during-attack" format="default"/>.
        </t>
        <section title="Protecting Against anchor="sec.retrospective-compromise" numbered="true" toc="default">
          <name>Protecting against Retrospective Compromise" anchor="sec.retrospective-compromise"> Compromise</name>
          <t>
            In a retrospective attack, the calling service was uncompromised during
            the call, but that an attacker subsequently wants to recover the content of the
            call. We assume that the attacker has access to the protected media stream
            as well as having full control of the calling service.
          </t>
          <t>
            If the calling service has access to the traffic keying material
            (as in SDES <xref target="RFC4568"/>), target="RFC4568" format="default"/>), then retrospective attack
            is trivial.

<!-- [rfced] Section 4.3.1:  We would like to expand "SDES" for ease
of the reader.  Does SDES refer to "Security Description" here, or perhaps "Source Description" per
several other documents in this cluster (e.g., RFC-to-be 8852 <draft-ietf-avtext-rid>)?

Original:
 If the calling service has access to the traffic keying material (as
 in SDES [RFC4568]), then retrospective attack is trivial. -->

            This form of attack is particularly serious in the Web context of the Web because
            it is standard practice in Web services to run extensive logging and monitoring. Thus, it is highly
            likely that if the traffic key is part of any HTTP request it will be logged somewhere and thus
            subject to subsequent compromise. It is this consideration that makes an automatic, public key-based
            key exchange mechanism imperative for WebRTC (this is a good idea for any communications
            security system) system), and this mechanism SHOULD <bcp14>SHOULD</bcp14> provide perfect forward secrecy Perfect Forward Secrecy (PFS).
            The signaling channel/calling channel / calling service can be used to authenticate this mechanism.
          </t>
          <t>
            In addition, if end-to-end keying is used,
            the system <bcp14>MUST NOT</bcp14> provide any APIs to extract either long-term
            keying material or to directly access any stored traffic keys.

<!-- [rfced] Section 4.3.1:  To what does "either" refer in this
sentence?

Original:
 In addition, if end-to-end keying is in used, the system MUST NOT
 provide any APIs to extract either long-term keying material or to
 directly access any stored traffic keys. -->

            Otherwise, an attacker who subsequently compromised the calling service
            might be able to use those APIs to recover the traffic keys and thus
            compromise the traffic.
          </t>
        </section>
        <section title="Protecting Against anchor="sec.during-attack" numbered="true" toc="default">
          <name>Protecting against During-Call Attack" anchor="sec.during-attack"> Attack</name>
          <t>
            Protecting against attacks during a call is a more difficult proposition. Even
            if the calling service cannot directly access keying material (as recommended
            in the previous section), it can simply mount a man-in-the-middle attack
            on the connection, telling Alice that she is calling Bob and Bob that
            he is calling Alice, while in fact the calling service is acting as
            a calling bridge and capturing all the traffic. Protecting against
            this form of attack requires positive authentication of the remote
            endpoint such as explicit out-of-band key verification (e.g., by a fingerprint)
            or a third-party identity service as described in
            <xref target="I-D.ietf-rtcweb-security-arch"/>. target="RFC8827" format="default"/>.
          </t>
          <section title="Key Continuity" anchor="sec.key-continuity"> anchor="sec.key-continuity" numbered="true" toc="default">
            <name>Key Continuity</name>
            <t>
              One natural approach is to use "key continuity". While a malicious
              calling service can present any identity it chooses to the user,
              it cannot produce a private key that maps to a given public key.
              Thus, it is possible for the browser to note a given user's
              public key and generate an alarm whenever that user's key
              changes. SSH The Secure Shell (SSH) protocol <xref target="RFC4251"/> target="RFC4251" format="default"/> uses a similar technique.
              (Note that the need to avoid explicit user consent on every call
              precludes the browser requiring an immediate manual check of the peer's key). key.)
            </t>
            <t>
              Unfortunately, this sort of key continuity mechanism is far less
              useful in the WebRTC context. First, much of the virtue of
              WebRTC (and any Web application) is that it is not bound to a
              particular piece of client software. Thus, it will be not only
              possible but routine for a user to use multiple browsers
              on different computers which that will of course have different
              keying material (SACRED <xref target="RFC3760"/> notwithstanding.) (Securely Available Credentials (SACRED) <xref target="RFC3760" format="default"/> notwithstanding).
              Thus, users will frequently be alerted to key mismatches which
              are in fact completely legitimate, with the result that they
              are trained to simply click through them. As it is known that
              users routinely will click through far more dire warnings
              <xref target="cranor-wolf"/>, target="cranor-wolf" format="default"/>, it seems extremely unlikely that
              any key continuity mechanism will be effective rather than
              simply annoying.
            </t>
            <t>
              Moreover, it is trivial to bypass even this kind of mechanism.
              Recall that unlike the case of SSH, the browser never directly
              gets the peer's identity from the user. Rather, it is provided
              by the calling service. Even enabling a mechanism of this type
              would require an API to allow the calling service to tell the
              browser "this is a call to user X". X." All the calling service
              needs to do to avoid triggering a key continuity warning
              is to tell the browser that "this is a call to user Y"
              where Y is confusable with X.
              Even if the user actually checks the other side's name
              (which all available evidence indicates is unlikely),
              this would require (a) the browser to use the trusted UI
              to provide the name and (b) the user to not be fooled by
              similar appearing names.
            </t>
          </section>
          <section title="Short anchor="sec.sas" numbered="true" toc="default">
            <name>Short Authentication Strings" anchor="sec.sas"> Strings</name>
            <t>
              ZRTP <xref target="RFC6189"/> target="RFC6189" format="default"/> uses a "Short Authentication String" (SAS) which is derived
              from the key agreement protocol.

<!-- [rfced] Section 4.3.2.2:  Is the SAS always derived from the key
agreement protocol (in which case "(SAS), which is" would be correct)
or only sometimes derived from the key agreement protocol (in which
case "(SAS) that is" would be correct)?

Original:
 ZRTP [RFC6189] uses a "short authentication string" (SAS) which is
 derived from the key agreement protocol. -->

 This SAS is designed to be compared
              by the users (e.g., read aloud over the voice channel or
              transmitted via an out of band out-of-band channel) and if confirmed by both sides precludes MITM
              attack. The intention is that the SAS is used once and then key
              continuity (though a different mechanism from that discussed
              above) is used thereafter.
            </t>
            <t>
              Unfortunately, the SAS does not offer a practical solution to the
              problem of

<!-- [rfced] Section 4.3.2.2:  Should "though a different mechanism"
be "through a different mechanism" or "although using a different
mechanism" here?

Original:
 The intention is that the SAS is used
 once and then key continuity (though a different mechanism from that
 discussed above) is used thereafter. -->

            </t>
            <t>
              Unfortunately, the SAS does not offer a practical solution to the
              problem of a compromised calling service.	"Voice conversion" systems, which modify
              voice from one speaker to make it sound like another,
              are an active area of research.
              These systems are already good enough to fool both
              automatic recognition systems <xref target="farus-conversion"/> target="farrus-conversion" format="default"/> and
              humans <xref target="kain-conversion"/> target="kain-conversion" format="default"/> in many cases, and are of course likely
              to improve in future, especially in an environment where the user just wants
              to get on with the phone call.
              Thus, even if the SAS is effective today, it is likely not to be so for much longer.
            </t>
            <t>
              Additionally, it is unclear that users will actually use an SAS.
              As discussed above, the browser UI constraints preclude requiring
              the SAS exchange prior to completing the call and so it must be
              voluntary; at most the browser will provide some UI indicator that the
              SAS has not yet been checked. However, it is well-known well known that when
              faced with optional security mechanisms, many users simply
              ignore them <xref target="whitten-johnny"/>. target="whitten-johnny" format="default"/>.
            </t>
            <t>
              Once users have checked the SAS once, key continuity
              is required to avoid them needing to check it on every call.
              However, this is problematic for reasons indicated in
              <xref target="sec.key-continuity"/>. target="sec.key-continuity" format="default"/>.
              In principle it is of course possible to render a different
              UI element to indicate that calls are using an unauthenticated
              set of keying material (recall that the attacker can just present
              a slightly different name so that the attack shows the
              same UI as a call to a new device or to someone you haven't
              called before) before), but as a practical matter, users simply ignore
              such indicators even in the rather more dire case of mixed
              content warnings.
            </t>
          </section>
          <section title="Third Party Identity" anchor="sec.third-party-id"> anchor="sec.third-party-id" numbered="true" toc="default">
            <name>Third-Party Identity</name>
            <t>
              The conventional approach to providing communications identity
              has of course been to have some third party third-party identity system
              (e.g., PKI) to authenticate the endpoints. Such mechanisms
              have proven to be too cumbersome for use by typical users
              (and nearly too cumbersome for administrators).
              However,
              a new generation of Web-based identity providers (BrowserID, Federated Google Login,
              Facebook Connect, OAuth <xref target="RFC6749"/>, target="RFC6749" format="default"/>, OpenID <xref target="OpenID"/>, target="OpenID" format="default"/>, WebFinger <xref target="RFC7033"/>), target="RFC7033" format="default"/>) has recently been developed
              and use Web technologies to provide lightweight (from the user's
              perspective) third-party authenticated transactions.
              It is possible to use systems of this type to authenticate WebRTC calls,
              linking them to existing user notions of identity
              (e.g., Facebook adjacencies). Specifically, the third-party
              identity system is used to bind the user's identity to
              cryptographic keying material which is then used to
              authenticate the calling endpoints.
              Calls which are authenticated
              in this fashion are naturally resistant even to active MITM attack
              by the calling site.
            </t>
            <t>
              Note that there is one special case in which PKI-style certificates
              do provide a practical solution: calls from end-users end users to
              large sites. For instance, if you are making a call
              to Amazon.com, then Amazon can easily get a certificate
              to authenticate their media traffic, just as they get
              one to authenticate their Web traffic. This does not provide
              additional security value in cases in which the calling site
              and the media peer are one in and the same, but might be useful
              in cases in which third parties (e.g., ad networks or
              retailers) arrange for calls but do not participate in them.
            </t>
          </section>
          <section title="Page anchor="sec.page-access" numbered="true" toc="default">
            <name>Page Access to Media" anchor="sec.page-access"> Media</name>
            <t>
              Identifying the identity of the far media endpoint is a
              necessary but not sufficient condition for providing media
              security. In WebRTC, media flows are rendered into
              HTML5 MediaStreams which can be manipulated by the calling
              site. Obviously, if the site can modify or view the media,
              then the user is not getting the level of assurance they
              would expect from being able to authenticate their peer.
              In many cases, this is acceptable because the user values
              site-based special effects over complete security from the
              site. However, there are also cases where users wish to
              know that the site cannot interfere. In order to facilitate
              that, it will be necessary to provide features whereby
              the site can verifiably give up access to the media streams.
              This verification must be possible both from the local
              side and the remote side. I.e., That is, users must be able to verify
              that the person called has engaged a secure media
              mode (see <xref target="sec.malicious"/>). target="sec.malicious" format="default"/>). In order to achieve this this, it will be necessary to
              cryptographically bind an indication of the local media
              access policy into the cryptographic authentication
              procedures detailed in the previous sections.
            </t>
            <t>
              It should be noted that the use of this secure media mode is
              left to the discretion of the site. When such a mode is
              engaged, the browser will need to provide indicia to the user
              that the associated media has been authenticated as coming from
              the identified user.  This allows WebRTC services that wish to
              claim end-to-end security to do so in a way that can be easily
              verified by the user. This model requires that the remote
              party's browser be included in the TCB, as described in
              <xref target="sec.web-security"/>. target="sec.web-security" format="default"/>.
            </t>
          </section>
        </section>
        <section title="Malicious Peers" anchor="sec.malicious"> anchor="sec.malicious" numbered="true" toc="default">
          <name>Malicious Peers</name>
          <t>
            One class of attack that we do not generally try to prevent
            is malicious peers. For instance, no matter what confidentiality
            measures you employ the person you are talking to might record
            the call and publish it on the Internet. Similarly, we do
            not attempt to prevent them from using voice or video processing
            technology from hiding or changing their appearance.
            While technologies (Digital Rights Management (DRM), etc.) do exist to attempt to address
            these issues, they are generally not compatible with open
            systems and WebRTC does not address them.

<!-- [rfced] Section 4.3.3:  We found this sentence confusing.
Does "from using voice or video processing technology from hiding
or changing their appearance" mean "from using voice or video
processing technology to hide or change their appearance," or
something else?

Also, for ease of the reader, we expanded "DRM" as "Digital Rights
Management."  Please let us know if this is incorrect.

Original:
 Similarly, we do not attempt to
 prevent them from using voice or video processing technology from
 hiding or changing their appearance.  While technologies (DRM, etc.)
 do exist to attempt to address these issues, they are generally not
 compatible with open systems and WebRTC does not address them.

Currently:
 ...  While technologies (Digital
 Rights Management (DRM), etc.) do exist to attempt to address these
 issues, they are generally not compatible with open systems and
 WebRTC does not address them. -->

          </t>
          <t>
            Similarly, we make no attempt to prevent prank calling or
            other unwanted calls. In general, this is in the scope of the
            calling site, though because WebRTC does offer some forms of
            strong authentication, that may be useful as part of a defense
            against such attacks.
          </t>
        </section>
      </section>
      <section title="Privacy Considerations" anchor="sec.privacy"> anchor="sec.privacy" numbered="true" toc="default">
        <name>Privacy Considerations</name>
        <section title="Correlation numbered="true" toc="default">
          <name>Correlation of Anonymous Calls"> Calls</name>
          <t>
            While persistent endpoint identifiers can be a useful security
            feature (see <xref target="sec.key-continuity"/>) target="sec.key-continuity" format="default"/>), they can
            also represent a privacy threat in settings where the user
            wishes to be anonymous. WebRTC provides a number of possible
            persistent identifiers such as DTLS certificates
            (if they are reused between connections) and RTCP CNAMES CNAMEs
            (if generated according to <xref target="RFC6222"/> target="RFC6222" format="default"/> rather
            than the privacy preserving privacy-preserving mode of <xref target="RFC7022"/>). target="RFC7022" format="default"/>).
            In order to prevent this type of correlation, browsers need to
            provide mechanisms to reset these identifiers (e.g., with the
            same lifetime as cookies). Moreover, the API should provide
            mechanisms to allow sites intended for anonymous calling
            to force the minting of fresh identifiers. In addition,
            IP addresses can be a source of call linkage
            <xref target="I-D.ietf-rtcweb-ip-handling"/>. target="RFC8828" format="default"/>.
          </t>
        </section>
        <section title="Browser Fingerprinting"> numbered="true" toc="default">
          <name>Browser Fingerprinting</name>
          <t>
            Any new set of API features adds a risk of browser fingerprinting,
            and WebRTC is no exception. Specifically, sites can use the
            presence or absence of specific devices as a browser fingerprint.
            In general, the API needs to be balanced between functionality
            and the incremental fingerprint risk.  See <xref target="Fingerprinting"/>. target="Fingerprinting" format="default"/>.
          </t>
        </section>
      </section>
    </section>
    <section title="Security Considerations" anchor="sec.sec_cons"> anchor="sec.sec_cons" numbered="true" toc="default">
      <name>Security Considerations</name>
      <t>This entire document is about security.</t>
    </section>
    <section title="Acknowledgements">
          <t>
            Bernard Aboba, Harald Alvestrand, Dan Druta,
            Cullen Jennings, Alan Johnston, Hadriel Kaplan (S 4.2.1), Matthew Kaufman,
            Martin Thomson, Magnus Westerlund.
          </t>
      <t></t>
    </section>

    <section title="IANA Considerations">
       <t>There are numbered="true" toc="default">
      <name>IANA Considerations</name>
      <t>This document has no IANA considerations.</t>
     </section>

    <section title="Changes Since -04">
      <t>
        <list style="symbols">
          <t>Replaced RTCWEB and RTC-Web with WebRTC, except when referring to the IETF WG</t>
          <t>Removed discussion of the IFRAMEd advertisement case, since we decided not to
          treat it specially.</t>
          <t>Added a privacy section considerations section.</t>
          <t>Significant edits to the SAS section to reflect Alan Johnston's comments.</t>
          <t>Added some discussion if IP location privacy and Tor.</t>
          <t>Updated the "communications consent" section to reflrect draft-ietf.</t>
          <t>Added a section about "malicious peers".</t>
          <t>Added a section describing screen sharing threats.</t>
          <t>Assorted editorial changes.</t>
        </list>
      </t> actions.</t>
    </section>
  </middle>
  <back>

    <references title="Normative References">
      &RFC2119;
      &RFC8174;
    <references>
      <name>References</name>
      <references>
        <name>Normative References</name>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2119.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.8174.xml"/>
      </references>
    <references title="Informative References">
      &RFC3261;
      &RFC3552;
      &RFC3711;
      &RFC2818;
      &RFC5479;
      &RFC5763;
      &RFC6347;
      &RFC4568;
      &RFC4251;
      &RFC3760;
      &RFC6189;
      &RFC8445;
      &RFC6222;
      &RFC6454;
      &RFC6455;
      &RFC6749;
      &RFC7022;
      &RFC7033;
      &RFC7675;
      &I-D.ietf-rtcweb-security-arch;
      &I-D.ietf-rtcweb-ip-handling;
      &I-D.ietf-rtcweb-overview;
      <references>
        <name>Informative References</name>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3261.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3552.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3711.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2818.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.5479.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.5763.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6347.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.4568.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.4251.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3760.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6189.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.8445.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6222.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6454.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6455.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6749.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7022.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7033.xml"/>
<xi:include href="https://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7675.xml"/>

 <!--draft-ietf-rtcweb-security-arch: 8827 -->
 <reference anchor="abarth-rtcweb"> anchor="RFC8827" target="https://www.rfc-editor.org/info/rfc8827">
 <front>
          <title>Prompting the user is security failure</title>
 <title>WebRTC Security Architecture</title>
 <author initials="A." surname="Barth">
            <organization></organization> initials='E.' surname='Rescorla' fullname='Eric Rescorla'>
   <organization/>
 </author>
 <date month='October' year='2020'/>
 </front>
 <seriesInfo name="RFC" value="8827"/>
 <seriesInfo name="DOI" value="10.17487/RFC8827"/>
 </reference>

<!-- Date from PDF properties draft-ietf-rtcweb-ip-handling: 8828 -->
<reference anchor="RFC8828" target="https://www.rfc-editor.org/info/rfc8828">
  <front>
    <title>WebRTC IP Address Handling Requirements</title>
    <author initials="J" surname="Uberti" fullname="Justin Uberti">
      <organization />
    </author>

    <date day="19" month="September" year="2010" month="October" year="2020" />
  </front>
  <seriesInfo name="" value="RTC-Web Workshop"/>
        <format target="http://rtc-web.alvestrand.com/home/papers/barth-security-prompt.pdf?attredirects=0" type="PDF"/> name="RFC" value="8828" />
  <seriesInfo name="DOI" value="10.17487/RFC8828"/>
</reference>

<!-- draft-ietf-rtcweb-overview: RFC 8825 -->
<reference anchor="whitten-johnny"> anchor="RFC8825" target="https://www.rfc-editor.org/info/rfc8825">
  <front>
          <title>Why Johnny Can't Encrypt: A Usability
    <title>Overview: Real-Time Protocols for Browser-Based Applications</title>
    <author initials="H." surname="Alvestrand" fullname="Harald T. Alvestrand">
      <organization />
    </author>
    <date month="October" year="2020" />
  </front>
  <seriesInfo name="RFC" value="8825" />
  <seriesInfo name="DOI" value="10.17487/RFC8825"/>
</reference>

        <reference anchor="abarth-rtcweb" target="http://rtc-web.alvestrand.com/home/papers/barth-security-prompt.pdf?attredirects=0">
          <front>
            <title>Prompting the user is security failure</title>
            <author initials="A." surname="Barth">
              <organization/>
            </author>
            <date month="September" year="2010"/>
          </front>
          <refcontent>RTC-Web Workshop</refcontent>
        </reference>

<!-- [rfced] Informative References:
The URL provided for [abarth-rtcweb] in the original XML file -
<http://rtc-web.alvestrand.com/home/papers/
barth-security-prompt.pdf?attredirects=0> - steers to
<https://672ad43e-a-6ea19bdf-s-sites.googlegroups.com/
a/alvestrand.com/rtc-web/home/papers/
barth-security-prompt.pdf?attachauth= ...(a very long string that is
different each time)...&attredirects=0>.

Is
<http://rtc-web.alvestrand.com/home/papers/barth-security-prompt.pdf?attredirects=0>
considered the most stable URL available?  Or, should the URL not be included
at all?

Original:
        <format
	    target="http://rtc-web.alvestrand.com/home/papers/barth-security-prompt.pdf?attredirects=0\
" type="PDF"/>
-->

        <reference anchor="whitten-johnny" target="https://www.usenix.org/legacy/publications/library/proceedings/sec99/whitten.html">
          <front>
            <title>Why Johnny Can't Encrypt: A Usability Evaluation of PGP 5.0</title>
            <author initials="A." surname="Whitten">
            <organization></organization>
              <organization/>
            </author>
            <author initials="J.D." surname="Tygar">
            <organization></organization>
              <organization/>
            </author>

          <!-- Date of USENIX Security Symposium -->
            <date month="August" year="1999" /> year="1999"/>
          </front>
        <seriesInfo name="" value="Proceedings
        <refcontent>Proceedings of the 8th USENIX Security Symposium, 1999"/> Symposium</refcontent>
        </reference>

        <reference anchor="cranor-wolf"> anchor="cranor-wolf" target="https://www.usenix.org/legacy/event/sec09/tech/full_papers/sunshine.pdf">
          <front>
            <title>Crying Wolf: An Empirical Study of SSL Warning Effectiveness</title>
            <author initials="J." surname="Sunshine">
            <organization></organization>
              <organization/>
            </author>
            <author initials="S." surname="Egelman">
            <organization></organization>
              <organization/>
            </author>
            <author initials="H." surname="Almuhimedi">
            <organization></organization>
              <organization/>
            </author>
            <author initials="N." surname="Atri">
            <organization></organization>
              <organization/>
            </author>
            <author initials="L." surname="cranor">
            <organization></organization> surname="Cranor">
              <organization/>
            </author>

          <!-- Date of USENIX Security Symposium -->
            <date month="August" year="2009" /> year="2009"/>
          </front>

        <seriesInfo name="" value="Proceedings
        <refcontent>Proceedings of the 18th USENIX Security Symposium, 2009"/> Symposium</refcontent>
        </reference>

        <reference anchor="kain-conversion">
          <front>
            <title>Design and Evaluation of a Voice Conversion Algorithm based
	    on Spectral Envelope Mapping and Residual Prediction</title>
            <author initials="A." surname="Kain">
            <organization></organization>
              <organization/>
            </author>
            <author initials="M." surname="Macon">
            <organization></organization>
              <organization/>
            </author>

          <!-- Date of ICASSP 2001 -->
            <date month="May" year="2001" /> year="2001"/>
          </front>
            <seriesInfo name="" value="Proceedings name="DOI" value="10.1109/ICASSP.2001.941039"/>
          <refcontent>Proceedings of the 2001 IEEE International Conference on
	  Acoustics, Speech, and Signal Processing (ICASSP)</refcontent>
        </reference>

<!-- [rfced] References:  May we update [kain-conversion] as follows?

Original:
 [kain-conversion]
            Kain, A. and M. Macon, "Design and Evaluation of a Voice
            Conversion Algorithm based on Spectral Envelope Mapping
            and Residual Prediction",  Proceedings of ICASSP, May 2001"/>
      </reference>
            2001, May 2001.

Currently (URL added during conversion to xml2rfc v3):
 [kain-conversion]
            Kain, A. and M. Macon, "Design and Evaluation of a Voice
            Conversion Algorithm based on Spectral Envelope Mapping
            and Residual Prediction", Proceedings of the 2001 IEEE
            International Conference on Acoustics, Speech, and Signal
            Processing (ICASSP), DOI 10.1109/ICASSP.2001.941039, May
            2001, <https://doi.org/10.1109/ICASSP.2001.941039>.

Suggested:
 [kain-conversion]
            Kain, A. and M. Macon, "Design and Evaluation of a Voice
            Conversion Algorithm based on Spectral Envelope Mapping
            and Residual Prediction", Proceedings of the 2001 IEEE
            International Conference on Acoustics, Speech, and Signal
            Processing (ICASSP), DOI 10.1109/ICASSP.2001.941039, May
            2001, <https://ieeexplore.ieee.org/document/941039>. -->

        <reference anchor="farus-conversion"> anchor="farrus-conversion">
          <front>
            <title>Speaker Recognition Robustness to Voice Conversion</title>
            <author initials="M." surname="Farrus">
            <organization></organization>
              <organization/>
            </author>
            <author initials="D." surname="Erro">
            <organization></organization>
              <organization/>
            </author>
            <author initials="J." surname="Hernando">
            <organization></organization>
              <organization/>
            </author>

          <!-- Date from http://www.researchgate.net/publication/228819912 -->
            <date month="January" year="2008" /> year="2008"/>
          </front>
        </reference>

        <reference anchor="huang-w2sp">
          <front>
            <title>Talking to Yourself for Fun and Profit</title>
            <author initials="L-S." surname="Huang">
            <organization></organization>
              <organization/>
            </author>
            <author initials="E.Y." surname="Chen">
            <organization></organization>
              <organization/>
            </author>
            <author initials="A." surname="Barth">
            <organization></organization>
              <organization/>
            </author>
            <author initials="E." surname="Rescorla">
            <organization></organization>
              <organization/>
            </author>
            <author initials="C." surname="Jackson">
            <organization></organization>
              <organization/>
            </author>

          <!-- Date from PDF properties -->
            <date month="May" year="2011" /> year="2011"/>
          </front>

        <seriesInfo name="" value="W2SP, 2011"/>
          <refcontent>Web 2.0 Security and Privacy (W2SP 2011)</refcontent>
        </reference>

        <reference anchor="finer-grained">
          <front>
            <title>Beware of Finer-Grained Origins</title>
            <author initials="A." surname="Barth">
            <organization></organization>
          </author>
          <author initials="C." surname="Jackson">
            <organization></organization>
              <organization/>
            </author>
            <author initials="A." surname="Barth">
              <organization/>
            </author>

          <!-- Date from PDF properties -->
            <date month="July" year="2008" /> year="2008"/>
          </front>

        <seriesInfo name="" value="W2SP, 2008"/>
          <refcontent>Web 2.0 Security and Privacy (W2SP 2008)</refcontent>
        </reference>

<!-- [rfced] Is the [CORS] reference still correct?  Should this document
instead refer to <https://fetch.spec.whatwg.org/>?  Perhaps
<https://fetch.spec.whatwg.org/#http-cors-protocol>, more specifically?

Original:
   [CORS]     van Kesteren, A., "Cross-Origin Resource Sharing", January
              2014.

When we search for this document, we find this link
<https://www.w3.org/TR/2009/WD-cors-20090317/>, which gives the following
warning:

   This version is outdated!
   For the latest version, please look at https://www.w3.org/TR/cors/.

<https://www.w3.org/TR/cors/> redirects to <https://fetch.spec.whatwg.org/>.

On <https://fetch.spec.whatwg.org/>, the reference for [CORS] refers back to
<https://www.w3.org/TR/cors/>:

[CORS]
    Anne van Kesteren. Cross-Origin Resource Sharing. 2 June 2020. REC. URL:
    https://www.w3.org/TR/cors/

<https://www.w3.org/TR/2020/SPSD-cors-20200602/> says that new implementations
should follow the "Fetch API Living Standard".

Please review and let us know if any updates are needed.
-->

        <reference anchor="CORS">
          <front>
            <title>Cross-Origin Resource Sharing</title>
            <author initials="A." surname="van Kesteren">
            <organization></organization>
              <organization/>
            </author>

          <!-- Date from http://www.w3.org/TR/2014/REC-cors-20140116/ -->
            <date day="16" month="January" year="2014" /> year="2014"/>
          </front>
        <format target="http://www.w3.org/TR/cors/" type="TXT"/>
        </reference>

        <reference anchor="SWF"> anchor="SWF" target="http://www.adobe.com/content/dam/Adobe/en/devnet/swf/pdf/swf_file_format_spec_v10.pdf">
          <front>
            <title>SWF File Format Specification Version 19</title>

          <author surname="Adobe">
          <organization></organization>
          </author>

          <!-- Date from PDF properties -->
            <author/>
            <date day="23" month="April" year="2013" /> year="2013"/>
          </front>
        <format target="http://www.adobe.com/content/dam/Adobe/en/devnet/swf/pdf/swf_file_format_spec_v10.pdf" type="PDF"/>
        </reference>

<!-- [rfced] Informative References:
The URL provided for [SWF] in the original XML file -
<http://www.adobe.com/content/dam/Adobe/en/devnet/swf/pdf/
swf_file_format_spec_v10.pdf> - steers to
<https://www.adobe.com/content/dam/acom/en/devnet/swf/pdf/
swf_file_format_spec_v10.pdf>, which in turn yields a 404.
Please provide a working and stable URL.

Original:
 [SWF]      "SWF File Format Specification Version 19", April 2013. -->

        <reference anchor="XmlHttpRequest"> anchor="XmlHttpRequest" target="https://www.w3.org/TR/XMLHttpRequest/">
          <front>
           <title>XMLHttpRequesti
            <title>XMLHttpRequest Level 2</title>
            <author initials="A." surname="van Kesteren">
                <organization></organization>
              <organization/>
            </author>
            <date day="17" month="January" year="2012"/>
          </front>
          <format target="http://www.w3.org/TR/XMLHttpRequest/" type="TXT"/>
        </reference>

<!-- [rfced] Informative References:  The URL as provided for
[XmlHttpRequest] in the original document -
<http://www.w3.org/TR/XMLHttpRequest/> - steers to a page with the
title "XMLHttpRequest Level 1," dated October 2016.  When we did a
Google search for "XMLHttpRequest Level 2," we found
<https://www.w3.org/TR/2012/WD-XMLHttpRequest-20120117/>, which is
partially obscured by a red box that says "This version is
outdated!"  The link in the box in turn steers to the October 2016
"XMLHttpRequest Level 1" page.

Please advise.

Original:
 [XmlHttpRequest]
            van Kesteren, A., "XMLHttpRequest Level 2", January 2012. -->

        <reference anchor="Fingerprinting"> anchor="Fingerprinting" target="https://www.w3.org/TR/fingerprinting-guidance/#acknowledgement/">
          <front>
            <title>Fingerprinting Guidance for Web Specification Authors (Draft)</title>

           <author surname="W3C">
               <organization></organization>
           </author>
            <author/>
            <date day="24" month="November" year="2013" /> year="2013"/>
          </front>
         <format target="https://www.w3.org/TR/fingerprinting-guidance/#acknowledgement/" type="TXT"/>
        </reference>

<!-- [rfced] Informative References:  The URL provided for
[Fingerprinting] in the original XML file -
<https://www.w3.org/TR/fingerprinting-guidance/#acknowledgement/> -
steers to a document with the title "Mitigating Browser
Fingerprinting in Web Specifications."  Should the reference be updated?  If
not, please provide the correct URL for the document listed below - perhaps
<https://www.w3.org/standards/history/fingerprinting-guidance> or
<https://www.w3.org/TR/fingerprinting-guidance/>?

Original:
 [Fingerprinting]
            "Fingerprinting Guidance for Web Specification Authors
            (Draft)", November 2013. -->

        <reference anchor="OpenID"> anchor="OpenID" target="https://openid.net/specs/openid-connect-core-1_0.html">
          <front>
            <title>OpenID Connect Core 1.0</title>
            <author initials="N." surname="Sakimura">
                <organization></organization>
              <organization/>
            </author>
            <author initials="J." surname="Bradley">
                <organization></organization>
              <organization/>
            </author>
            <author initials="M." surname="Jones">
                <organization></organization>
              <organization/>
            </author>
            <author initials="B." surname="de Medeiros">
                <organization></organization>
              <organization/>
            </author>
            <author initials="C." surname="Mortimore">
                <organization></organization>
              <organization/>
            </author>
            <date day="8" month="November" year="2014" /> year="2014"/>
          </front>
          <format target="https://openid.net/specs/openid-connect-core-1_0.html/" type="HTML"/>
        </reference>
      </references>
    </references>
    <section numbered="false" toc="default">
      <name>Acknowledgements</name>
      <t>
            <contact fullname="Bernard Aboba"/>, <contact fullname="Harald
	    Alvestrand"/>, <contact fullname="Dan Druta"/>,
            <contact fullname="Cullen Jennings"/>, <contact fullname="Alan
	    Johnston"/>, <contact fullname="Hadriel Kaplan"/> (<xref
	    target="sec.ice"/>), <contact fullname="Matthew Kaufman"/>,
            <contact fullname="Martin Thomson"/>, <contact fullname="Magnus Westerlund"/>.
      </t>

    </section>
  </back>

<!-- [rfced] Please let us know how/if the following should be made
consistent:

 iframe / IFRAME (also per draft-ietf-rtcweb-security-arch)

 calling-service.example.com/ vs. calling-service.example.com
   (We also see "http://anything.example.org/".)

 CROSS-PROTOCOL attacks (Section 3.3) /
   cross-protocol attacks (Section 4.2)   Is the capitalization
     supposed to indicate emphasis? -->

</rfc>