<!DOCTYPE html>
<html lang="en">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
      
        Maham Riaz
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="
      
        Maham Riaz
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Maham Riaz" />

    
      <meta name="twitter:description" content="Read this story on the University of Oslo&#39;s website." />
    

    
      <meta name="twitter:image" content="/ritmo/english/people/phd-fellows/mahamr/dsc00465-3.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/english/people/phd-fellows/mahamr/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/english/people/phd-fellows/mahamr/index.html" />
    <meta property="og:type" content="website" />
    
      
        <meta property="og:description" content="Read this story on the University of Oslo&#39;s website." />
      
    

    

    
      
      
        
        
          
            
            
              
              <meta property="og:image" content="/ritmo/english/people/phd-fellows/mahamr/dsc00465-3.jpg" />
              <meta property="og:image:width" content="628" />
              <meta property="og:image:height" content="838" />

              
                

                
                
                
                  
                

                
                
                
                <meta property="og:updated_time" content="1724437923" />
              
            
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context english faculty en '  id="vrtx-person">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Jump to content">
    <ul id="hidnav">
     <li><a href="#right-main">Jump to main content</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/english/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo/english" class="uio-host">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Language menu">
              <a href="/ritmo/" class="header-lang-no-link" lang="no">No</a>
              <span>En</span>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Menu"><span>Menu</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/english/for-employees/">For employees</a></li>
            <li class="my-studies"><a href="https://minestudier.no/en/index.html">My studies</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">Search our webpages</label>
            
            <button type="submit">Search</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="english parent-folder">
  <a href="/ritmo/english/">Home</a>
    </li>
    <li class="about">
  <a href="/ritmo/english/about/">About the Centre</a>
    </li>
    <li class="publications">
  <a href="/ritmo/english/publications/">Publications</a>
    </li>
    <li class="vrtx-active-item people vrtx-current-item" aria-current="page">
  <a href="/ritmo/english/people/">People</a>
    </li>
    <li class="news-and-events">
  <a href="/ritmo/english/news-and-events/">News and events</a>
    </li>
    <li class="research">
  <a href="/ritmo/english/research/">Research</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/english/">Go to uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Sub menu</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/english/people/"><span>People</span></a></li>
            <li class="vrtx-parent" ><a href="/ritmo/english/people/phd-fellows/"><span>PhD Fellows</span></a>

      <ul>
          <li class="vrtx-child"><a class="vrtx-marked" aria-current="page" href="/ritmo/english/people/phd-fellows/mahamr/"><span>Maham Riaz</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="right-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Breadcrumbs">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4">
            <a href="/ritmo/english/people/">People</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-5 vrtx-breadcrumb-before-active">
            <a href="/ritmo/english/people/phd-fellows/">PhD Fellows</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-6 vrtx-breadcrumb-active">Maham Riaz
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
      <div id="vrtx-content">
        <div id="vrtx-main-content">
          <h1>
      
        Maham Riaz
      </h1>
          
      
      
      
        
  <div id="vrtx-person-position">
    <span>
        Doctoral Research Fellow
          -
        <a href="https://www.hf.uio.no/imv/english?vrtx=unit-view&amp;areacode=143695">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion (IMV)</a>
    </span>
  </div>


      
          <div id="vrtx-person-contact-info-wrapper">
              
      
        
        
        
          
          
            
            
            
            
              <img class="vrtx-person-image" src="/ritmo/english/people/phd-fellows/mahamr/dsc00465-3.jpg" alt="Image of&nbsp;Maham&nbsp;Riaz" loading="lazy"/>
            
          
        
      
              
      <div class="vrtx-person-contactinfo">
        
        
        

          
	<span id="vrtx-person-change-language-link">
	  <a href="/ritmo/personer/stipendiater/mahamr/index.html">Norwegian<span class="offscreen-screenreader"> version of this page</span></a>
	</span>


          
            <div class="vrtx-person-contact-info-line vrtx-email"><span class="vrtx-label">Email</span>
              
                <a class="vrtx-value" href="mailto:maham.riaz@imv.uio.no">maham.riaz@imv.uio.no</a>
              
            </div>
          
          
          
            <div class="vrtx-person-contact-info-line vrtx-mobile">
              <span class="vrtx-label">Mobile phone</span>
              
              
                <span class="vrtx-value">+47 939 42 780</span>
              
            </div>
          
          
          
          
            <div class="vrtx-person-contact-info-line vrtx-username">
              <span class="vrtx-label">Username</span>
              
                  <div class="vrtx-login">
    <a href="/ritmo/english/people/phd-fellows/mahamr/index.html?vrtx=login&amp;amp;authTarget" rel="nofollow">Log in</a>
  </div>

              
            </div>
          
          
            
              <div class="vrtx-person-visiting-address"><span class="vrtx-label">Visiting address</span>
                
                  <span class="vrtx-address-line">澳门皇冠体育,皇冠足球比分sv. 3A</span>
                
                  <span class="vrtx-address-line">Harald Schjelderups hus</span>
                
                  <span class="vrtx-address-line">0373 Oslo</span>
                
              </div>
            
          
          
            <div class="vrtx-person-postal-address"><span class="vrtx-label"> Postal address</span>
              
                <span class="vrtx-address-line">Postboks 1133 Blindern</span>
              
                <span class="vrtx-address-line">0318 Oslo</span>
              
            </div>
          
          
            


  <div class="vrtx-person-other-units">
    <span class="vrtx-label">Other affiliations</span>
        <span class="vrtx-value">
          <a href="https://www.hf.uio.no/english">Faculty of Humanities</a>
          (Student)
        </span>
        <span class="vrtx-value">
          <a href="/link/english">Senter for l?ring og utdanning</a>
          (Student)
        </span>
  </div>


          
        
      </div>
              
      <div id="vrtx-person-contact-info-extras">
        
          <a id="vrtx-press-photo" href="  /ritmo/english/people/phd-fellows/mahamr/pana4512.jpg?alt=original&amp;vrtx=view-as-webpage
">Press photo</a>
        
        
          <a id="vrtx-person-vcard" href="/ritmo/english/people/phd-fellows/mahamr?vrtx=vcf">Download business card</a>
        
      </div>
              <div class="vrtx-person-contact-info-wrapper-end"></div>
          </div>
          <div id="vrtx-person-main-content-wrapper">
            <div class="vrtx-article-body">
              <h2><img alt="Image may contain: Computer, Personal computer, Computer monitor, Peripheral, Computer keyboard." height="1594" src="/ritmo/english/people/phd-fellows/mahamr/dil-0227.jpeg" width="4487" loading="lazy"/><br/>
<br/>
Academic interests</h2>

<p>I am a classically trained musician with a flair for studio life. Operating giant SSL consoles, routing signals and hearing the satisfying 'click' of a microphone cable are some of the things that bring me immense joy. Studios are where all the magic happens - there is always much to learn from recording, mixing and hardware design. As a composer and performer, I always look for ways to put my research ambitions in the context of my compositions. Psychoacoustic perception, immersive audio, mixed reality, gestural music and microphone techniques are some of the areas I have invested significant time in researching and understanding.</p>

<p>I particularly enjoy working in the storytelling realm, which starts at digital animation and spreads into sound for film, games and other audiovisual media.&nbsp;<span style="color: var(--textColor); font-family: var(--mainFontStack);">I am currently </span><span style="color: var(--textColor); font-family: var(--mainFontStack);">working as a doctoral research fellow on the AMBIENT project at RITMO, where I focus on audiovisual rhythms and motion capture.</span></p>

<h2>Background</h2>

<ul>
	<li>M.Mus in Music Technology, Steinhardt School, New York University, <em>New York, NY, USA</em></li>
	<li>B.Sc (Hons) in Accounting &amp; Finance, Lahore University of Management Sciences, <em>Lahore, Pakistan</em></li>
</ul>

<h2>Awards</h2>

<ul>
	<li>Fulbright Scholarship (2019 - 2021)</li>
	<li>Graduate School Award (2019 - 2021) New York University</li>
	<li>Music Tech Academic Achievement Award (2021) New York University</li>
</ul>

            </div>
            

            
      
      
      
      
      
      
        
        
      

      
      

      
        



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publications</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Scientific articles and book chapters</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">Other</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10311983" class="vrtx-external-publication">
        <div id="vrtx-publication-10311983">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10311983">
                Riaz, Maham; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Inverse and indirect mappings in embodied AI systems in everyday environments.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Computer Science.
                </span>
                            7.
            doi: <a href="https://doi.org/10.3389/fcomp.2025.1603769">10.3389/fcomp.2025.1603769</a>.
            <a href="https://hdl.handle.net/11250/5341046">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores how musicking technologies—interactive systems with musical properties—can enhance everyday public environments. We are particularly interested in investigating the effects of musical interactions in non-musical settings, such as offices, meeting rooms, and social work areas. Traditional music technologies (such as instruments) are built for goal-directed, conscious, and voluntary interactions. We propose a new perspective on embodied AI through systems that utilize indirect, inverse, unconscious, and, at times, involuntary interactions. Four different sound/music systems are examined and discussed with regard to their activity level: a reactive “birdbox,” a reactive painting, active self-playing guitars, and interactive music balls. All these systems are multimodal, containing sensors that detect various physical inputs to produce sound and light, and having varying levels of perceived agency. The paper explores differences between direct/indirect and regular/inverse embodied AI paradigms. This study demonstrates how minimalistic interactions have the potential to yield complex and engaging musicking experiences, challenging the norms of overly intricate AI implementations.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10284249" class="vrtx-external-publication">
        <div id="vrtx-publication-10284249">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10284249">
                Riaz, Maham; Guo, Jinyue; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where to Put That Microphone? A Study of Sound Localization in Ambisonics Recordings.
                </span>
                    <span class="vrtx-parent-contributors">
                            In McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">p. 455–466.</span>
            doi: <a href="https://doi.org/10.5281/ZENODO.17497086">10.5281/ZENODO.17497086</a>.
            <a href="https://hdl.handle.net/11250/5317948">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper examines the effects of microphone placement on sound localization in first-order Ambisonics recordings. Two microphone setups were used to capture a moving audio source in a lab environment. Array A, a tetrahedral microphone, was placed in the centre of the recording space. Array B consisted of four similar tetrahedral microphones charting a rectangular perimeter surrounding the space. Motion capture data of the moving sound source shows that anglegrams calculated from the Ambisonics recordings can be effectively used for sound localization. An additional perceptual listening study with binaural renders of the audio signals showed that the centrally-placed Array A provided superior localization. However, the corner-placed Array B performed better than expected.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391317" class="vrtx-external-publication">
        <div id="vrtx-publication-2391317">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391317">
                Riaz, Maham; Guo, Jinyue; G?ksülük, Bilge Serdar &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where is That Bird? The Impact of Artificial Birdsong in Public
Indoor Environments.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sei?a, Mariana &amp; Wirfs-Brock, Jordan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;25: Proceedings of the 20th International Audio Mostly Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720659.</span>
                            
                <span class="vrtx-pages">p. 344–351.</span>
            doi: <a href="https://doi.org/10.1145/3771594.3771629">10.1145/3771594.3771629</a>.
            <a href="https://hdl.handle.net/11250/4977325">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores the effects of nature sounds, specifically bird sounds, on human experience and behavior in indoor public environments. We report on an intervention study where we introduced an interactive sound device to alter the soundscape. Phenomenological observations and a survey showed that participants noticed and engaged with the bird sounds primarily through causal listening; that is, they attempted to identify the sound source. Participants generally responded positively to the bird sounds, appreciating the calmness and surprise it brought to the environment. The analyses revealed that relative loudness was a key factor influencing the experience. A too-high sound level may feel unpleasant, while a too-low sound level makes it unnoticeable due to background noise. These findings highlight the importance of automatic level adjustments and considering acoustic conditions in soundscape interventions. Our study contributes to a broader discourse on sound perception, human interaction with sonic spaces, and the potential of auditory design in public indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391321" class="vrtx-external-publication">
        <div id="vrtx-publication-2391321">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391321">
                Riaz, Maham; Theodoridis, Ioannis; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        VentHackz: Exploring the Musicality of Ventilation Systems.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Cavdir, Doga &amp; Berthaut, Florent (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=031C5553-12A0-453E-B4FA-DC2B19B95BD2">The International Conference on New Interfaces for Musical Expression</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.15698831">10.5281/zenodo.15698831</a>.
            <a href="https://hdl.handle.net/10852/119716">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Ventilation systems can be seen as huge examples of interfaces for musical expression, with the potential of merging sound, space, and human interaction. This paper explores conceptual similarities between ventilation systems and wind instruments and explores approaches to “hacking” ventilation systems with components that produce and modify sound. These systems enable the creation of unique sonic and visual experiences by manipulating airflow and making mechanical adjustments. Users can treat ventilation systems as musical interfaces by altering shape, material, and texture or augmenting vents. We call for heightened attention to the sound-making properties of ventilation systems and call for action (#VentHackz) to playfully improve the soundscapes of our indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2349747" class="vrtx-external-publication">
        <div id="vrtx-publication-2349747">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2349747">
                Riaz, Maham; Guo, Jinyue &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Spatial Audio Recordings from Commercially Available 360-degree Video Cameras.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Brooks, Anthony L.; Banakou, Domna &amp; Ceperkovic, Slavica (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 13th EAI International Conference on ArtsIT, Interactivity and Game Creation, ArtsIT 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=AD8FEF33-C155-4915-A7BF-A1BE33DDAC4D">Springer</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783031972546.</span>
                            
                <span class="vrtx-pages">p. 160–172.</span>
            doi: <a href="https://doi.org/10.1007/978-3-031-97254-6_12">10.1007/978-3-031-97254-6_12</a>.
            <a href="https://hdl.handle.net/11250/3259963">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper investigates the spatial audio recording capabilities of various commercially available 360-degree cameras (GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S). A dedicated ambisonics audio recorder (Zoom H3VR) was used for comparison. Six action sequences were performed around the recording setup, including impulsive and continuous vocal and non-vocal stimuli. The audio streams were extracted from the videos and compared using spectrograms and anglegrams. The anglegrams show adequate localization in ambisonic recordings from the GoPro MAX and Zoom H3VR. All cameras feature undocumented noise reduction and audio enhancement algorithms, use different types of audio compression, and have limited audio export options. This makes it challenging to use the spatial audio data reliably for research purposes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2292179" class="vrtx-external-publication">
        <div id="vrtx-publication-2292179">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292179">
                Guo, Jinyue; Riaz, Maham &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Four 360-Degree Cameras for Spatial Video Recording and Analysis,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9789893520758.</span>
                            
            
            <a href="https://hdl.handle.net/10852/113954">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper reports on a desktop investigation and a lab experiment comparing the video recording capabilities of four commercially available 360-degree cameras: GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S. The four cameras all use different recording formats and settings and have varying video quality and software support. This makes it difficult to conduct analyses and compare between devices. We have implemented new functions in the Musical Gestures Toolbox (MGT) for reading and merging files from the different platforms. Using the capabilities of FFmpeg, we have also made a new function for converting between different 360-degree video projections and formats. This allows (music) researchers to exploit 360-degree video recordings using regular video-based analysis pipelines.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200393" class="vrtx-external-publication">
        <div id="vrtx-publication-2200393">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200393">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        An Investigation of Supervised Learning in Music Mood Classification for Audio and MIDI.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Andreopoulou, Areti &amp; Boren, Braxton (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 155th Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781713894667.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3664099">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This study aims to use supervised learning – specifically, support vector machines – as a tool for a music mood classification task. Four audio and MIDI datasets, each containing over four hundred files, were composed for use in the training and testing processes. Mood classes were formed according to the valence-arousal plane, resulting in the following: happy, sad, relaxed, and tense. Additional runs were also conducted with the linear discriminant analysis, a dimensionality reduction technique commonly used to better the performance of the classifier. The relevant audio and MIDI features were carefully selected for extraction. MIDI datasets for the same music generated better classification results than corresponding audio datasets. Furthermore, when music is composed with each mood associated with a particular key instead of mixed keys, the classification accuracy is higher.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200397" class="vrtx-external-publication">
        <div id="vrtx-publication-2200397">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200397">
                Riaz, Maham &amp; Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Using SuperCollider with OSC Commands for Spatial Audio Control in a Multi-Speaker Setup.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Andreopoulou, Areti &amp; Boren, Braxton (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 155th Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781713894667.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3399808">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">With the ever-increasing prevalence of technology, its application in various music-related processes, such as music composition and performance, has become increasingly prominent. One fascinating area where technology finds utility is in music performance, offering opportunities for extensive sound exploration and manipulation. In this paper, we introduce an approach utilizing SuperCollider and Open Sound Control (OSC) commands in a multi-speaker setup, enabling spatial audio control for a truly interactive audio spatialization experience. We delve into the musicological dimensions of these distinct methods, examining their integration within a live performance setting to uncover their artistic and expressive potential. By merging technology and musicology, our research aims to unlock new avenues for immersive and captivating musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200042" class="vrtx-external-publication">
        <div id="vrtx-publication-2200042">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200042">
                Riaz, Maham; Upham, Finn; Burnim, Kayla; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing inertial motion sensors for capturing human micromotion,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2023.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network .
                </span>
                <span class="vrtx-issn">ISSN 9789152773727.</span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.8316051">10.5281/zenodo.8316051</a>.
            <a href="https://hdl.handle.net/10852/106232">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents a study of the noise level of accelerometer data from a mobile phone compared to three commercially available IMU-based devices (AX3, Equivital, and Movesense) and a marker-based infrared motion capture system (Qualisys). The sensors are compared in static positions and for measuring human micromotion, with larger motion sequences as reference. The measurements show that all but one of the IMU-based devices capture motion with an accuracy and precision that is far below human micromotion. However, their data and representations differ, so care should be taken when comparing data between devices.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200362" class="vrtx-external-publication">
        <div id="vrtx-publication-2200362">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200362">
                Riaz, Maham; Martinez, Diana Hernandez &amp; Roginska, Agnieszka
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Immersive Soundfield Microphone Techniques: An Experimental Approach to Recording Music,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 153rd Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781942220404.</span>
                            
            
            <a href="https://hdl.handle.net/11250/5208949">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Multi-capsule microphone techniques are used in recording the multi-dimensionality of musical performances. This paper evaluates two such techniques: one developed with the 6-capsule Double Mid-Side-Z (DMS-Z) array, and the other with the 32-capsule spherical array. The recordings underwent a similar process of decoding using the 3DCC method, in which each set of arrays was mapped onto 8 directional signals on the horizontal plane. From the performance aspect, the music ensemble consisted of four speakers, a subwoofer and two singers moving within the recording space. Two mixes were produced: Mix A with a combination of the 3DCC-decoded DMS-Z array, spot microphones and Hamasaki Square microphones; and Mix B with the 3DCC-decoded spherical array, spot microphones and Hamasaki Square microphones. These immersive mixes were then evaluated for various perceptual characteristics. Detailed subjective testing is yet to be conducted, but the results so far suggest some perceptual differences in the acoustics and perceived movement of the singers. This is ongoing research that aims to understand whether using a similar audio decoding process on two different multi-array microphone configurations can sufficiently eliminate the perceptual differences between them.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200358" class="vrtx-external-publication">
        <div id="vrtx-publication-2200358">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200358">
                Riaz, Maham &amp; Roginska, Agnieszka
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Psychoacoustic Perception of Distance in Monophonic and Binaural Music,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 153rd Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781942220404.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3514758">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music plays an important role in immersive environments. With the rise in immersive technology, scientists in the field have been researching how music is perceived in extended reality (XR), in an attempt to improve the authenticity of audiovisual experiences. The auditory perception of distance is an important issue in 3D virtual environments but has been considered an obscure area. The study aims to compare monophonic and binaural signals for more accurate localization, over different types of music audio sources.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1470780">View all works in NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-2391318" class="vrtx-external-publication">
        <div id="vrtx-publication-2391318">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391318">
                Riaz, Maham
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where is That Bird? The Impact of Artificial Birdsong in Public
Indoor Environments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4262079">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores the effects of nature sounds, specifically bird sounds, on human experience and behavior in indoor public environments. We report on an intervention study where we introduced an interactive sound device to alter the soundscape. Phenomenological observations and a survey showed that participants noticed and engaged with the bird sounds primarily through causal listening; that is, they attempted to identify the sound source. Participants generally responded positively to the bird sounds, appreciating the calmness and surprise it brought to the environment. The analyses revealed that relative loudness was a key factor influencing the experience. A too-high sound level may feel unpleasant, while a too-low sound level makes it unnoticeable due to background noise. These findings highlight the importance of automatic level adjustments and considering acoustic conditions in soundscape interventions. Our study contributes to a broader discourse on sound perception, human interaction with sonic spaces, and the potential of auditory design in public indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391322" class="vrtx-external-publication">
        <div id="vrtx-publication-2391322">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391322">
                Riaz, Maham
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        VentHackz: Exploring the Musicality of Ventilation Systems.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4318091">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Ventilation systems can be seen as huge examples of interfaces for musical expression, with the potential of merging sound, space, and human interaction. This paper explores conceptual similarities between ventilation systems and wind instruments and explores approaches to &quot;hacking&quot; ventilation systems with components that produce and modify sound. These systems enable the creation of unique sonic and visual experiences by manipulating airflow and making mechanical adjustments. Users can treat ventilation systems as musical interfaces by altering shape, material, and texture or augmenting vents. We call for heightened attention to the sound-making properties of ventilation systems and call for action (#VentHackz) to playfully improve the soundscapes of our indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2387009" class="vrtx-external-publication">
        <div id="vrtx-publication-2387009">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2387009">
                Riaz, Maham
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Art and Science of Immersive Sound Design in Games - What&#39;s the Secret?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4671577">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In modern games, sound design is far more than mere background noise—it conveys a story and shapes entire worlds. We will explore how gamification principles—interaction, feedback, progression, challenge, exploration, and motivation—integrate with sound design techniques such as spatial audio, adaptive mixing, and procedural audio to create responsive audio environments. Practical aspects of implementing game audio will be discussed within Unity (and Wwise).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2349756" class="vrtx-external-publication">
        <div id="vrtx-publication-2349756">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2349756">
                Riaz, Maham
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Spatial Audio Recordings from Commercially Available 360-degree Video Cameras.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4399466">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper investigates the spatial audio recording capabilities of various commercially available 360-degree cameras (GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S). A dedicated ambisonics audio recorder (Zoom H3VR) was used for comparison. Six action sequences were performed around the recording setup, including impulsive and continuous vocal and non-vocal stimuli. The audio streams were extracted from the videos and compared using spectrograms and anglegrams. The anglegrams show adequate localization in ambisonic recordings from the GoPro MAX and Zoom H3VR. All cameras feature undocumented noise reduction and audio enhancement algorithms, use different types of audio compression, and have limited audio export options. This makes it challenging to use the spatial audio data reliably for research purposes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2349762" class="vrtx-external-publication">
        <div id="vrtx-publication-2349762">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2349762">
                Riaz, Maham &amp; Theodoridis, Ioannis
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ventilation hacking.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4350817">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We examine innovative approaches to mitigate the issue of unwanted ventilation noise, transforming it from a disruptive element into a source of ambient or musical sound. We propose a range of solutions, from mechanical adjustments to acoustic treatments and digital interventions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2370702" class="vrtx-external-publication">
        <div id="vrtx-publication-2370702">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2370702">
                Jensenius, Alexander Refsum; Riaz, Maham; Oldfield, Thomas L &amp; Juarez, Karenina
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO-studenter presenterer nye installasjoner.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4494725">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Studenter tilknyttet RITMO stiller ut prosjektene sine p? Popsenteret: en interaktiv symaskin fra 1911, et lyttende og snakkende speil, og et interaktivt maleri. Hvordan kan slike objekter gi musikalske opplevelser?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200281" class="vrtx-external-publication">
        <div id="vrtx-publication-2200281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200281">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Design in Unity: Immersive Audio for Virtual Reality Storytelling.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4338498">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Research talk on sound design for games and immersive environments. The Unity game engine is used for environmental modeling. The Oculus Spatializer plugin provides control over binaural spatialization with native head related transfer functions (HRTF). Game scenes included C# scripts, which accounted for intermittent emitters (randomly triggered sounds of nature, critters and birds), crossfades, occlusion and raycasting. In the mixing stage, mixer groups, mixer snapshsots, snapshot triggers, SFX reverb sends, and low/high-pass filters were some of the tools demonstrated.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200163" class="vrtx-external-publication">
        <div id="vrtx-publication-2200163">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200163">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        An Investigation of Supervised Learning in Music Mood Classification for Audio and MIDI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4798154">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This study aims to use supervised learning – specifically, support vector machines – as a tool for a music mood classification task. Four audio and MIDI datasets, each containing over four hundred files, were composed for use in the training and testing processes. Mood classes were formed according to the valence-arousal plane, resulting in the following: happy, sad, relaxed, and tense. Additional runs were also conducted with the linear discriminant analysis, a dimensionality reduction technique commonly used to better the performance of the classifier. The relevant audio and MIDI features were carefully selected for extraction. MIDI datasets for the same music generated better classification results than corresponding audio datasets. Furthermore, when music is composed with each mood associated with a particular key instead of mixed keys, the classification accuracy is higher.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200050" class="vrtx-external-publication">
        <div id="vrtx-publication-2200050">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200050">
                Riaz, Maham; Upham, Finn; Burnim, Kayla; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing inertial motion sensors for capturing human micromotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5122145">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents a study of the noise level of accelerometer data from a mobile phone compared to three commercially available IMU-based devices (AX3, Equivital, and Movesense) and a marker-based infrared motion capture system (Qualisys). The sensors are compared in static positions and for measuring human micromotion, with larger motion sequences as reference. The measurements show that all but one of the IMU-based devices capture motion with an accuracy and precision that is far below human micromotion. However, their data and representations differ, so care should be taken when comparing data between devices.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200082" class="vrtx-external-publication">
        <div id="vrtx-publication-2200082">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200082">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Using SuperCollider with OSC Commands for Spatial Audio Control in a Multi-Speaker Setup.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3615973">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">With the ever-increasing prevalence of technology, its application in various music-related processes, such as music composition and performance, has become increasingly prominent. One fascinating area where technology finds utility is in music performance, offering opportunities for extensive sound exploration and manipulation. In this paper, we introduce an approach utilizing SuperCollider and Open Sound Control (OSC) commands in a multi-speaker setup, enabling spatial audio control for a truly interactive audio spatialization experience. We delve into the musicological dimensions of these distinct methods, examining their integration within a live performance setting to uncover their artistic and expressive potential. By merging technology and musicology, our research aims to unlock new avenues for immersive and captivating musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2065434" class="vrtx-external-publication">
        <div id="vrtx-publication-2065434">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2065434">
                Riaz, Maham
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Psychoacoustic Perception of Distance in Monophonic and Binaural Music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4462192">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music plays an important role in immersive environments. With the rise in immersive technology, scientists in the field have been researching how music is perceived in extended reality (XR), in an attempt to improve the authenticity of audiovisual experiences. The auditory perception of distance is an important issue in 3D virtual environments but has been considered an obscure area. The study aims to compare monophonic and binaural signals for more accurate localization, over different types of music audio sources.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2065435" class="vrtx-external-publication">
        <div id="vrtx-publication-2065435">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2065435">
                Riaz, Maham; Martinez, Diana Hernandez &amp; Roginska, Agnieszka
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Immersive Soundfield Microphone Techniques: An Experimental Approach to Recording Music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5242671">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Multi-capsule microphone techniques are used in recording the multi-dimensionality of musical performances. This paper evaluates two such techniques: one developed with the 6-capsule Double Mid-Side-Z (DMS-Z) array, and the other with the 32-capsule spherical array. The recordings underwent a similar process of decoding using the 3DCC method, in which each set of arrays was mapped onto 8 directional signals on the horizontal plane. From the performance aspect, the music ensemble consisted of four speakers, a subwoofer and two singers moving within the recording space. Two mixes were produced: Mix A with a combination of the 3DCC-decoded DMS-Z array, spot microphones and Hamasaki Square microphones; and Mix B with the 3DCC-decoded spherical array, spot microphones and Hamasaki Square microphones. These immersive mixes were then evaluated for various perceptual characteristics. Detailed subjective testing is yet to be conducted, but the results so far suggest some perceptual differences in the acoustics and perceived movement of the singers. This is ongoing research that aims to understand whether using a similar audio decoding process on two different multi-array microphone configurations can sufficiently eliminate the perceptual differences between them.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1470780">View all works in NVA</a></p>
    </div>

      </div>
    </div>



      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Published</span>
        <span class="published-date">Sep. 5, 2022 10:32 AM </span>
        
        - <span class="last-modified-date">Last modified</span>
        <span class="last-modified-date">Aug. 23, 2024 8:32 PM</span>
        
        </div>
      
          </div>
        </div>
        <div id="vrtx-additional-content">
          
      
          

<div class="vrtx-projects vrtx-frontpage-box">
  <h2>Projects</h2>

  <div class="vrtx-box-content">
  <ul class="only-links">
      <li><a href="/ritmo/english/projects/Bodies-in-Concert/index.html">Bodies in Concert</a></li>
      <li><a href="/ritmo/english/projects/ambient/index.html">Bodily Entrainment to Audiovisual Rhythms (AMBIENT)</a></li>
      <li><a href="/ritmo/english/projects/musical-hci/index.html">Musical human-computer interaction</a></li>
      <li><a href="/ritmo/english/projects/self-playing-guitars/index.html">Self-playing Guitars</a></li>
  </ul>

  </div>
</div>



          
          
      
      
        </div>
      </div>
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo/english">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Contact information</h2>
   <p><a href="/ritmo/english/about/">Contact us</a><br>
   <a href="/english/about/getting-around/areas/gaustad/ga09/">Find us</a></p>
</div>
<div>
   <h2>About the website</h2>
   <p><a href="/english/about/regulations/privacy-declarations/privacy-policy-web.html">Cookies</a><br>
   <a href="/ritmo/english/people/phd-fellows/mahamr/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Accessibility statement (in Norwegian only)</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Responsible for this page</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/english/people/phd-fellows/mahamr/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Log in
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/english/" title="Go to uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
