<!DOCTYPE html>
<html lang="no">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
          Kroppslig synkronisering til audiovisuelle rytmer
          
            (AMBIENT)
          
         - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="
          Kroppslig synkronisering til audiovisuelle rytmer
          
            (AMBIENT)
          
         - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
              
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary_large_image" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Kroppslig synkronisering til audiovisuelle rytmer (AMBIENT)" />

    
      <meta name="twitter:description" content="Hvordan p?virker rytmer mennesker?" />
    

    
      <meta name="twitter:image" content="/ritmo/english/projects/ambient/annie-spratt-unsplash.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/prosjekter/ambient/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/prosjekter/ambient/index.html" />
    <meta property="og:type" content="website" />
    
      <meta property="og:description" content="Hvordan p?virker rytmer mennesker?" />
    

    

    
      
      
        
        
          
            
            
              
              <meta property="og:image" content="/ritmo/english/projects/ambient/annie-spratt-unsplash.jpg" />
              <meta property="og:image:width" content="1000" />
              <meta property="og:image:height" content="563" />

              
                

                
                
                
                  
                

                
                
                
                <meta property="og:updated_time" content="1774348828" />
              
            
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
        <meta property="og:title" content="Kroppslig synkronisering til audiovisuelle rytmer
           
             (AMBIENT)
           "/>
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context ritmo faculty no total-main '  id="vrtx-structured-project-two">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Hopp til innhold">
    <ul id="hidnav">
     <li><a href="#total-main">Hopp til hovedinnhold</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo" class="uio-host">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Spr?kmeny">
              <span>No</span>
              <a href="/ritmo/english/" class="header-lang-en-link" lang="en">En</a>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Meny"><span>Meny</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/for-ansatte/">For ansatte</a></li>
            <li class="my-studies"><a href="https://minestudier.no/nb/index.html">Mine studier</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">S?k i nettsidene til UiO</label>
            
            <button type="submit">S?k</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="vrtx-active-item ritmo parent-folder vrtx-current-item" aria-current="page">
  <a href="/ritmo/">澳门皇冠体育,皇冠足球比分 RITMO</a>
    </li>
    <li class="om">
  <a href="/ritmo/om/">Om senteret</a>
    </li>
    <li class="personer">
  <a href="/ritmo/personer/">Personer</a>
    </li>
    <li class="aktuelt">
  <a href="/ritmo/aktuelt/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="forskning">
  <a href="/ritmo/forskning/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="publikasjoner">
  <a href="/ritmo/publikasjoner/">Publikasjoner</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/">G? til uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" class="hidden" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Undermeny</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-parent" ><a href="/ritmo/prosjekter/"><span>Prosjekter</span></a>

      <ul>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/ferdige-prosjekter/"><span>Avsluttede prosjekter</span></a></li>
          <li class="vrtx-child"><a class="vrtx-marked" aria-current="page" href="/ritmo/prosjekter/ambient/"><span>AMBIENT</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/audiopred/"><span>AudioPred</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/bioRITMO/"><span>bioRITMO</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/bodies-in-concert/"><span>Bodies in Concert</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/djembedance/"><span>DjembeDance – Multimodal rytme i musikk og dans fra Vest-Afrika</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/dr-squiggles/"><span>Dr. Squiggles</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/FUNCTUMUS/"><span>FUNCTUMUS: The Functional Turn in Music</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/JND%20Groove/"><span>JNDgroove</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/mirage/"><span>MIRAGE</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/modellering-roboter/"><span>Modellering og roboter</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/musiclab/"><span>MusicLab</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/musikalske-frysninger/"><span>Musikalske frysninger</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/musical-hci/"><span>Musikalsk menneske-maskin-interaksjon</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/musikalsk-tid-form/"><span>Musikalsk tid og form</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/nordicsmc/"><span>NordicSMC</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/pirc/"><span>PIRC</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/professor-plucky/"><span>Professor Plucky</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/ritpart/"><span>RITPART</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/self-playing-guitars/"><span>Selvspillende gitarer</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/synkronisering-sosial-tilknytning-behag/"><span>Synkronisering, sosial tilknytning og behag</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/synkronisert-robotikk/"><span>Synkronisert robotikk</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/time/"><span>TIME - Musikk og mikrorytmikk</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/ytelse-bevissthet-musikkopplevelsen/"><span>Ytelse og bevissthet i musikkopplevelsen</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="total-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Br?dsmulesti">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-3 vrtx-breadcrumb-before-active">
            <a href="/ritmo/prosjekter/">Prosjekter</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4 vrtx-breadcrumb-active">AMBIENT
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
        <div id="vrtx-content">
          
          <div id="vrtx-main-content">
            
            <h1>
              
                <span class="vrtx-short-title">
                  AMBIENT
                </span>
              
              <span class="vrtx-title">Kroppslig synkronisering til audiovisuelle rytmer</span>
            </h1>
            
            
      
        <dl class="project-status-bar">
          
        
        
          
        
          
        
        
          
        
        
          <dt class="duration-header">Varighet</dt>
          <dd class="duration-value">01.12.2021–31.12.2026</dd>
        
        </dl>
      
            
      
        <div class="vrtx-introduction"><p>Hvordan p?virker rytmer mennesker?</p></div>
      
            
              
      
      
      
      
      
        
      
      
        
      
      
        <div class="vrtx-middle-image">
          <div class="vrtx-middle-image-wrapper">
            <img src="/ritmo/english/projects/ambient/annie-spratt-unsplash.jpg" alt="En person som sitter foran en PC-skjerm p? et kontor. " loading="lazy"/>
          </div>
          
            <div class="vrtx-imagetext">
              <div class="vrtx-imagedescription"><p>AMBIENT vil studere hvordan rytmer i ?bakgrunnen? i innend?rsmilj?er, som et blinkende lys eller en?tikkende klokke,?p?virker folk.?</p></div>
              
                <span class="vrtx-photo">
                  <span class="vrtx-photo-prefix">Foto: </span>Annie Spratt/Unsplash
                </span>
              
            </div>
          
        </div>
      
            
            
      
      
        <div class="vrtx-person-list-contact-persons vrtx-frontpage-box">
        <h2>Kontaktpersoner</h2>
          <div class="vrtx-box-content">
            <ul>
                  
                      
                      
                      
                      
                      
                      <li>
                        
                          <div class="vrtx-contact-person-picture">
                              <img src="/ritmo/personer/senterledelse/alexanje/alexander_web.jpg" alt="Alexander Refsum Jensenius" loading="lazy"/>
                          </div>
                        
                        <div class="vrtx-contact-person-info">
                          
                              <a class="vrtx-contact-person-name" href="/ritmo/personer/senterledelse/alexanje/index.html">Alexander Refsum Jensenius</a>
                          
                          
                            <span class="vrtx-contact-person-affiliation">Universitetet i Oslo</span>
                          
                          
                            <span class="vrtx-contact-person-role">Prosjektleder</span>
                          
                        </div>
                      </li>
                  
                  </ul>
              </div>
            </div>
      
            <div class="navigation-links navigation-links-three-columns">
              

            </div>

            
            
            
            
            
            
            <div class="vrtx-article-body">
              <h2>Om prosjektet</h2><p>AMBIENT skal unders?ke hvordan rytmer p?virker mennesker. En tikkende klokke er en lydlig rytme. Et blinkende lys er en visuell rytme. Slike lydlige og visuelle rytmer blandes til audiovisuelle rytmer i et rom.</p><p>V?r hypotese er at slike audiovisuelle rytmer p?virker folk, bevisst eller ubevisst. Dette vil f?rst bli studert gjennom m?nedslange observasjonsstudier av folk som jobber alene p? kontor. Vi skal m?le de audiovisuelle rytmene og sammenligne disse med b?de kvalitative og kvantitative data fra deltagerne.</p><p>Deretter skal vi studere hvordan folk 澳门皇冠体育,皇冠足球比分er gjennom nettbaserte verkt?y. M?let er ? utvikle en helhetlig teori om audiovisuelle og spatiotemporale rytmer. Til slutt skal vi utforske interaktive systemer for ? gjenskape audiovisuelle rytmer i et nettbasert klasserom.</p><div class="vrtx-media-player"><vrtx-component-block>

  <!-- initialising default video dimension values. -->





    <div id="mediaspiller-15094557547100819" class="vrtx-media-player">
      
    </div>
    <div class="vrtx-media-button-holder">
    </div>



</vrtx-component-block></div>
            </div>
            <div class="participants">
              
<div class="vrtx-person-list-participants vrtx-frontpage-box">
    <h2>Deltakere</h2>

  <div class="vrtx-box-content">
      <ul>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Bilde av Alexander Refsum Jensenius" src="/ritmo/personer/senterledelse/alexanje/arjensenius_2025_2_150px.png" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="/ritmo/personer/senterledelse/alexanje/index.html" class="vrtx-participant-name">Alexander Refsum Jensenius</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Bilge Serdar G?ksülük</span>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Bilde av Maham Riaz" src="/ritmo/personer/stipendiater/mahamr/dsc00465-3.jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="/ritmo/personer/stipendiater/mahamr/index.html" class="vrtx-participant-name">Maham Riaz</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Bilde av Arthur Jinyue Guo" src="/ritmo/personer/stipendiater/jinyueg/subject.png" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="/ritmo/personer/stipendiater/jinyueg/index.html" class="vrtx-participant-name">Arthur Jinyue Guo</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Karenina Natalia Juarez</span>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Thomas Eric Oldfield</span>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Joachim Perceval Laurent Erwin Poutaraud</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Nino Jakeli</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Henrik Haraldsen Sveen</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Eyyüb Güven</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Ferdinand Schwarz</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Juliet Merchant</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Alicja Terelak</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
      </ul>
  </div>
</div>

            </div>
            
      
      
      
      <div class="related-groups">
        <div class="vrtx-groups">
          
          
            <div class="vrtx-related-groups">  <div class="vrtx-groups-related-to-project vrtx-frontpage-box">
    <h2>Involverte forskergrupper</h2>
    <div class="vrtx-box-content">
      <ul class="only-links">
            <li><a href="/ritmo/forskning/laboratorier/fourms/index.html">fourMs bevegelseslab</a></li>
      </ul>
    </div>
  </div>
</div>
          
          
        </div>
      </div>
      
            
            
            
	  
	  

    
    

    
    

	  
      



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publikasjoner</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Vitenskapelige artikler og bokkapitler</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">B?ker</a></li>
            <li><a href="#vrtx-publication-tab-3" name="vrtx-publication-tab-3">Andre</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10420743" class="vrtx-external-publication">
        <div id="vrtx-publication-10420743">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10420743">
                Guo, Jinyue; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Investigating Auditory–Visual Perception Using Multi-Modal Neural Networks with the SoundActions Dataset.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Transactions of the International Society for Music Information Retrieval.
                </span>
                            9(1),
                <span class="vrtx-pages">s. 85–85.</span>
            doi: <a href="https://doi.org/10.5334/tismir.223">10.5334/tismir.223</a>.
            <a href="https://hdl.handle.net/11250/5486133">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Musicologists, psychologists, and computer scientists study relationships between auditory and visual stimuli from very different perspectives and using various terminologies and methodologies. This article aims to bridge the gap between phenomenological sound theory, auditory–visual theory, and audio–video processing and machine learning. We introduce the SoundActions dataset, a collection of 365 audio–video recordings of (primarily) short sound actions. Each recording has been human?labeled and annotated according to Pierre Schaeffer’s theory of reduced listening, which describes the property of the sound itself (e.g., ‘an impulsive sound’) instead of the source (e.g., ‘a bird sound’). With these reduced?type labels in the audio–video dataset, we conducted two experiments: (1) fine?tuning the latest audio–video transformer model on the reduced?type labels in the SoundActions dataset, proving that the model can recognize reduced?type labels, and observing that the modality?imbalance phenomenon is similar to the added value theory by Michel Chion and (2) proposing the Ensemble of Perception Mode Adapters method inspired by Pierre Schaeffer’s three listening modes, improving the audio–video model also on reduced?type tasks.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10311983" class="vrtx-external-publication">
        <div id="vrtx-publication-10311983">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10311983">
                Riaz, Maham; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Inverse and indirect mappings in embodied AI systems in everyday environments.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Computer Science.
                </span>
                            7.
            doi: <a href="https://doi.org/10.3389/fcomp.2025.1603769">10.3389/fcomp.2025.1603769</a>.
            <a href="https://hdl.handle.net/11250/5341046">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores how musicking technologies—interactive systems with musical properties—can enhance everyday public environments. We are particularly interested in investigating the effects of musical interactions in non-musical settings, such as offices, meeting rooms, and social work areas. Traditional music technologies (such as instruments) are built for goal-directed, conscious, and voluntary interactions. We propose a new perspective on embodied AI through systems that utilize indirect, inverse, unconscious, and, at times, involuntary interactions. Four different sound/music systems are examined and discussed with regard to their activity level: a reactive “birdbox,” a reactive painting, active self-playing guitars, and interactive music balls. All these systems are multimodal, containing sensors that detect various physical inputs to produce sound and light, and having varying levels of perceived agency. The paper explores differences between direct/indirect and regular/inverse embodied AI paradigms. This study demonstrates how minimalistic interactions have the potential to yield complex and engaging musicking experiences, challenging the norms of overly intricate AI implementations.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10299007" class="vrtx-external-publication">
        <div id="vrtx-publication-10299007">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10299007">
                Christodoulou, Anna-Maria; Arnim, Hugh Alexander von &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Supporting Narrative Comprehension in Programmatic Music through Music and Light.
                </span>
                    <span class="vrtx-parent-contributors">
                            I McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">s. 447–454.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17509282">10.5281/zenodo.17509282</a>.
            <a href="https://hdl.handle.net/11250/5330619">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Programmatic music, such as Tchaikovsky’s Overture Romeo and Juliet, relies on the audience’s ability to associate musical motifs with narrative elements. This is a demanding task for less experienced listeners, particularly when cues are subtle, such as those conveyed through timbre. This paper explores how dynamic stage lighting, driven by physiological signals, can enhance narrative comprehension in orchestral performance. Using the LightHearted interactive lighting system, different characters of the Overture were mapped to distinct colored lights, whose intensities were dynamically modulated in real time by the heart rates of the conductor and selected musicians. This integration aimed to convey subtle narrative cues to the audience in real time. Audience feedback suggests that this approach not only clarifies musical narratives but also enhances the overall experience.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10284249" class="vrtx-external-publication">
        <div id="vrtx-publication-10284249">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10284249">
                Riaz, Maham; Guo, Jinyue; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where to Put That Microphone? A Study of Sound Localization in Ambisonics Recordings.
                </span>
                    <span class="vrtx-parent-contributors">
                            I McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">s. 455–466.</span>
            doi: <a href="https://doi.org/10.5281/ZENODO.17497086">10.5281/ZENODO.17497086</a>.
            <a href="https://hdl.handle.net/11250/5317948">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper examines the effects of microphone placement on sound localization in first-order Ambisonics recordings. Two microphone setups were used to capture a moving audio source in a lab environment. Array A, a tetrahedral microphone, was placed in the centre of the recording space. Array B consisted of four similar tetrahedral microphones charting a rectangular perimeter surrounding the space. Motion capture data of the moving sound source shows that anglegrams calculated from the Ambisonics recordings can be effectively used for sound localization. An additional perceptual listening study with binaural renders of the audio signals showed that the centrally-placed Array A provided superior localization. However, the corner-placed Array B performed better than expected.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10284183" class="vrtx-external-publication">
        <div id="vrtx-publication-10284183">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10284183">
                Guo, Jinyue; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cross-modal Analysis of Spatial-Temporal Auditory Stimuli and Human Micromotion when Standing Still in Indoor Environments.
                </span>
                    <span class="vrtx-parent-contributors">
                            I McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">s. 871–882.</span>
            doi: <a href="https://doi.org/10.5281/ZENODO.17502603">10.5281/ZENODO.17502603</a>.
            <a href="https://hdl.handle.net/11250/5317903">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper examines how a soundscape influences human stillness. We are particularly interested in how spatial and temporal features of a soundscape influence human micromotion and swaying patterns. The analysis is based on 345 Ambisonics audio recordings of different indoor environments and corresponding accelerometer data captured at the chest of a person standing still for ten minutes. We calculated the temporal and spatial correlation between the person&#39;s quantity of motion and the sound energy of the Ambisonic recordings. While no clear temporal correlations were found, we discovered a correlation between the spatial directionality of the micromotion and the sound direction of arrival. The results suggest a potential entrainment between the directionality of environmental sounds and human swaying patterns, which have not been thoroughly studied previously compared to the temporal or spectral features of indoor soundscapes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254521" class="vrtx-external-publication">
        <div id="vrtx-publication-10254521">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254521">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Pixasonics: An Image Sonification Toolbox for Python.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Cardoso, F. Amílcar; Vickers, Paul; Martins, Pedro &amp; Roddy, Stephen (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 30th International Conference on Auditory Display (ICAD 2025).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=E7F15E71-C7FE-4CDA-A5F2-F71F96B5254A">Department of Informatics Engineering, University of Coimbra, Portugal</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798991456210.</span>
                            
                <span class="vrtx-pages">s. 28–35.</span>
            doi: <a href="https://doi.org/https:/hdl.handle.net/1853/79958">https:/hdl.handle.net/1853/79958</a>.
            <a href="https://hdl.handle.net/11250/4102869">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Pixasonics is a new Python library for interactive image analysis and exploration through image sonification. It uses real-time audio and visualization to help uncover patterns in image data. With Pixasonics, users can launch one or more small web applications (running in a Jupyter Notebook), probe image data using various feature extraction methods, and map those feature vectors to synthesis parameters. The target users are researchers interested in exploring image and volumetric data and creative users who want an intuitive tool for experimental sound design. Pixasonics’ design aims to strike a balance between an easy-to-use web application with minimal boilerplate code necessary and a library that can be integrated into more advanced workflows. Real-time exploration is at the heart, but it can also be used to script non-real-time sonifications of large datasets. This paper presents Pixasonics, its structure, interface, and advanced features, and discusses preliminary feedback from biology researchers and music technologists.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391317" class="vrtx-external-publication">
        <div id="vrtx-publication-2391317">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391317">
                Riaz, Maham; Guo, Jinyue; G?ksülük, Bilge Serdar &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where is That Bird? The Impact of Artificial Birdsong in Public
Indoor Environments.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Sei?a, Mariana &amp; Wirfs-Brock, Jordan (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;25: Proceedings of the 20th International Audio Mostly Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720659.</span>
                            
                <span class="vrtx-pages">s. 344–351.</span>
            doi: <a href="https://doi.org/10.1145/3771594.3771629">10.1145/3771594.3771629</a>.
            <a href="https://hdl.handle.net/11250/4977325">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores the effects of nature sounds, specifically bird sounds, on human experience and behavior in indoor public environments. We report on an intervention study where we introduced an interactive sound device to alter the soundscape. Phenomenological observations and a survey showed that participants noticed and engaged with the bird sounds primarily through causal listening; that is, they attempted to identify the sound source. Participants generally responded positively to the bird sounds, appreciating the calmness and surprise it brought to the environment. The analyses revealed that relative loudness was a key factor influencing the experience. A too-high sound level may feel unpleasant, while a too-low sound level makes it unnoticeable due to background noise. These findings highlight the importance of automatic level adjustments and considering acoustic conditions in soundscape interventions. Our study contributes to a broader discourse on sound perception, human interaction with sonic spaces, and the potential of auditory design in public indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391292" class="vrtx-external-publication">
        <div id="vrtx-publication-2391292">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391292">
                Sveen, Henrik Haraldsen; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cyclic Patterns and Spatial Orientations in Artificial Impulsive Autonomous Sensory Meridian Response (ASMR) Sounds.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Sei?a, Mariana &amp; Wirfs-Brock, Jordan (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;25: Proceedings of the 20th International Audio Mostly Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720659.</span>
                            
                <span class="vrtx-pages">s. 124–131.</span>
            doi: <a href="https://doi.org/10.1145/3771594.3771651">10.1145/3771594.3771651</a>.
            <a href="https://hdl.handle.net/11250/4286194">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Autonomous Sensory Meridian Response (ASMR) is a tingling sensation in the neck and spine often triggered by specific sounds. This paper reports a study on the impact of different cyclic patterns and spatial orientations—defined here as the perceived directionality and motion of sound sources in a three-dimensional auditory space—on inducing ASMR experiences. The results demonstrate that both the type of cyclic pattern and the spatial orientation significantly influence the intensity and nature of ASMR experiences. Furthermore, the research explores synthesizing ASMR-inducing sounds while preserving key audio characteristics from acoustically recorded ASMR content. Through survey data analysis and regression modeling, distinct patterns emerge regarding the relationship between personality traits and ASMR experience. The findings contribute to a deeper understanding of ASMR as a sensory phenomenon and provide insights into the potential applications of artificially generated ASMR stimuli. Additionally, the research sheds light on the role of spatiality in ASMR experiences and the synthesis of ASMR-inducing sounds for future studies and practical applications</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391321" class="vrtx-external-publication">
        <div id="vrtx-publication-2391321">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391321">
                Riaz, Maham; Theodoridis, Ioannis; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        VentHackz: Exploring the Musicality of Ventilation Systems.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Cavdir, Doga &amp; Berthaut, Florent (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=031C5553-12A0-453E-B4FA-DC2B19B95BD2">The International Conference on New Interfaces for Musical Expression</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.15698831">10.5281/zenodo.15698831</a>.
            <a href="https://hdl.handle.net/10852/119716">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Ventilation systems can be seen as huge examples of interfaces for musical expression, with the potential of merging sound, space, and human interaction. This paper explores conceptual similarities between ventilation systems and wind instruments and explores approaches to “hacking” ventilation systems with components that produce and modify sound. These systems enable the creation of unique sonic and visual experiences by manipulating airflow and making mechanical adjustments. Users can treat ventilation systems as musical interfaces by altering shape, material, and texture or augmenting vents. We call for heightened attention to the sound-making properties of ventilation systems and call for action (#VentHackz) to playfully improve the soundscapes of our indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2349747" class="vrtx-external-publication">
        <div id="vrtx-publication-2349747">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2349747">
                Riaz, Maham; Guo, Jinyue &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Spatial Audio Recordings from Commercially Available 360-degree Video Cameras.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Brooks, Anthony L.; Banakou, Domna &amp; Ceperkovic, Slavica (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 13th EAI International Conference on ArtsIT, Interactivity and Game Creation, ArtsIT 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=AD8FEF33-C155-4915-A7BF-A1BE33DDAC4D">Springer</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783031972546.</span>
                            
                <span class="vrtx-pages">s. 160–172.</span>
            doi: <a href="https://doi.org/10.1007/978-3-031-97254-6_12">10.1007/978-3-031-97254-6_12</a>.
            <a href="https://hdl.handle.net/11250/3259963">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper investigates the spatial audio recording capabilities of various commercially available 360-degree cameras (GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S). A dedicated ambisonics audio recorder (Zoom H3VR) was used for comparison. Six action sequences were performed around the recording setup, including impulsive and continuous vocal and non-vocal stimuli. The audio streams were extracted from the videos and compared using spectrograms and anglegrams. The anglegrams show adequate localization in ambisonic recordings from the GoPro MAX and Zoom H3VR. All cameras feature undocumented noise reduction and audio enhancement algorithms, use different types of audio compression, and have limited audio export options. This makes it challenging to use the spatial audio data reliably for research purposes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2377129" class="vrtx-external-publication">
        <div id="vrtx-publication-2377129">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2377129">
                Grosz, Patrick Georg; Solberg, Ragnhild Torvanger; Katz, Jonah; Vu, Mai Ha; Jensenius, Alexander Refsum &amp; Patel-Grosz, Pritty
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        An outline of the narrative grammar of electronic dance music.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Musicae Scientiae.
                </span>
                <span class="vrtx-issn">ISSN 1029-8649.</span>
                            29(4),
                <span class="vrtx-pages">s. 556–575.</span>
            doi: <a href="https://doi.org/10.1177/10298649251321709">10.1177/10298649251321709</a>.
            <a href="https://hdl.handle.net/11250/4079640">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">We argue that electronic dance music (EDM) exhibits a parallel structural organization to that which has been proposed for cartoons (comics) after the model of hierarchical structure proposed in theoretical linguistics. According to this parallel, both systems are governed by general cognitive mechanisms for the narrative organization of tension and release, which are not modality-specific. We show that notions from visual narrative analysis, such as an Establisher–Initial–Peak–Release template, can be applied directly to EDM tracks as an Intro/Breakdown–Buildup–Core–Outro/Cut template. In doing so, we focus on how to formally define and operationalize relevant notions such as Breakdown, Buildup, and Core. As part of our analysis, we show that the scene-setting Establisher segments of visual narratives map onto two distinct categories in EDM: they correspond to intro sections at the beginning of a track and to breakdown sections in the middle of a track; we strengthen the analogy to visual narrative analysis by introducing refinements such as a pre-drop break that often occurs at the end of a buildup segment. To adjudicate between competing hypotheses on the hierarchical structure of a given EDM track, we demonstrate that analytical tests from linguistics and visual narrative analysis can be successfully applied. By introducing these analytical tools, this article sets the stage for further explorations in the linguistically informed analysis of the structure and meaning of EDM.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2292157" class="vrtx-external-publication">
        <div id="vrtx-publication-2292157">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292157">
                Guo, Jinyue; Christodoulou, Anna-Maria; Laczko, Balint &amp; Glette, Kyrre
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        LVNS-RAVE: Diversified audio generation with RAVE and Latent Vector Novelty Search.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Li, Xiaodong &amp; Handl, Julia (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    GECCO &#39;24 Companion: Proceedings of the Genetic and Evolutionary Computation Conference Companion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400704956.</span>
                            
                <span class="vrtx-pages">s. 667–670.</span>
            doi: <a href="https://doi.org/10.1145/3638530.3654432">10.1145/3638530.3654432</a>.
            <a href="https://hdl.handle.net/11250/3455371">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Evolutionary Algorithms and Generative Deep Learning have been two of the most powerful tools for sound generation tasks. However, they have limitations: Evolutionary Algorithms require complicated designs, posing challenges in control and achieving realistic sound generation. Generative Deep Learning models often copy from the dataset and lack creativity. In this paper, we propose LVNS-RAVE, a method to combine Evolutionary Algorithms and Generative Deep Learning to produce realistic and novel sounds. We use the RAVE model as the sound generator and the VGGish model as a novelty evaluator in the Latent Vector Novelty Search (LVNS) algorithm. The reported experiments show that the method can successfully generate diversified, novel audio samples under different mutation setups using different pre-trained RAVE models. The characteristics of the generation process can be easily controlled with the mutation parameters. The proposed algorithm can be a creative tool for sound artists and musicians.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307431" class="vrtx-external-publication">
        <div id="vrtx-publication-2307431">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307431">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied Musicking Technologies: Inspired by Professor Marc Leman.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Maes, Pieter-Jan (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Liber Amicorum Marc Leman: A life in music, science, and technology.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Ghent University.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4968714">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In his seminal book “Embodied Music Cognition and Mediation Technology”, Marc Leman Leman (2008a) drew up a theoretical framework that has influenced a whole new generation of researchers, myself included. Building on a long tradition of systematic musicology, combined with ecological psychology
and modern technology, he convincingly set the direction for a fresh approach to scientific studies of musical experiences. In the following, I will reflect on some concepts that he raised in his discussion, and I will point out some that he left for others, like me, to explore in more detail.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2275344" class="vrtx-external-publication">
        <div id="vrtx-publication-2275344">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2275344">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        The clash between physical and digital realm: hybrid movement training during the pandemic.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Discover Education.
                </span>
                            3(1).
            doi: <a href="https://doi.org/10.1007/s44217-024-00158-y">10.1007/s44217-024-00158-y</a>.
            <a href="https://hdl.handle.net/11250/3330914">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This article is based on ethnographic research conducted in one of the physical training institutions that offer the Laban/Bartenieff Movement Studies (LBMS) certification program in 2021. Because of the COVID-19 pandemic crisis, the training was held in a hybrid format in which some participants were in the studio while others were attending remotely via Zoom. Zoom-mediated movement training for long hours revealed how the intervention of telematic technologies challenged practitioners’ sensorial experience and sense-making process. Moreover, bringing co-located and remote participants’ experiences together in the hybrid setting disclosed different modes of interaction dynamics in the studio and online. Overall, participants described their hybrid experience as a clash. In the article, starting from unfolding those clashes from the enactive perspective, I discuss how remote intercorporeality through an audio–video streaming system, Zoom, challenges participants’ sensorial experience and how remote interaction affects the shared sense-making process in the hybrid format setting.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2292179" class="vrtx-external-publication">
        <div id="vrtx-publication-2292179">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292179">
                Guo, Jinyue; Riaz, Maham &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Four 360-Degree Cameras for Spatial Video Recording and Analysis,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9789893520758.</span>
                            
            
            <a href="https://hdl.handle.net/10852/113954">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper reports on a desktop investigation and a lab experiment comparing the video recording capabilities of four commercially available 360-degree cameras: GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S. The four cameras all use different recording formats and settings and have varying video quality and software support. This makes it difficult to conduct analyses and compare between devices. We have implemented new functions in the Musical Gestures Toolbox (MGT) for reading and merging files from the different platforms. Using the capabilities of FFmpeg, we have also made a new function for converting between different 360-degree video projections and formats. This allows (music) researchers to exploit 360-degree video recordings using regular video-based analysis pipelines.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2309706" class="vrtx-external-publication">
        <div id="vrtx-publication-2309706">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2309706">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Digital intervention into dramaturgical thoughts:
The dramaturgy of remote dance improvisation.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Parviainen, Pessi &amp; Rouhiainen, Leena (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Solvitur Ambulando &quot;solved by moving&quot; : dramaturgies of artistic research : proceedings of CARPA8.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Theatre Academy, University of the Arts.
                </span>
                <span class="vrtx-issn">ISSN 9789523530744.</span>
                            
            doi: <a href="https://doi.org/https:/nivel.teak.fi/carpa8/digital-intervention-into-dramaturgical-thoughts-the-dramaturgy-of-remote-dance-improvisation/">https:/nivel.teak.fi/carpa8/digital-intervention-into-dramaturgical-thoughts-the-dramaturgy-of-remote-dance-improvisation/</a>.
            <a href="https://hdl.handle.net/11250/3350640">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2333493" class="vrtx-external-publication">
        <div id="vrtx-publication-2333493">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2333493">
                H?ffding, Simon; Hansen, Niels Christian &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music Research “in the Wild” – Introducing the MusicLab Copenhagen Special Collection.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            7.
            doi: <a href="https://doi.org/10.1177/20592043241294161">10.1177/20592043241294161</a>.
            <a href="https://hdl.handle.net/10852/115237">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This special collection of Music &amp; Science contains 111 articles. They thoroughly describe a particular instantiation of a research concert, namely the innovative and complex event MusicLab Copenhagen. This took place over 14?hours on October 26, 2021, in Copenhagen, Denmark. Working with The Danish String Quartet (DSQ), one of the world&#39;s best chamber ensembles, a research team from RITMO, complemented with researchers from several other European institutions, ran experiments and studied how mind and body are engaged during a concert. This was a unique opportunity to capture concurrent qualitative, behavioral, and physiological measurements in a concert hall, delicately balancing the scientific ideals of reliability and ecological validity.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2333450" class="vrtx-external-publication">
        <div id="vrtx-publication-2333450">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2333450">
                Campbell, Edward; Souza, Jonathan De &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Gestures, Actions and Play In Bj?rn Heile&#39;s 3 × 10 Musical Actions For Three Socially Distanced Performers.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Tempo.
                </span>
                <span class="vrtx-issn">ISSN 0040-2982.</span>
                            78(310),
                <span class="vrtx-pages">s. 51–61.</span>
            
            <a href="https://hdl.handle.net/11250/4511735">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Bj?rn Heile’s 3 × 10 Musical Actions for Three Socially Distanced Performers features frequent changes in musical material, playing style and instrumental combinations. Throughout a series of short sections, the performers play, sing, speak, conduct and move around, following instructions that appear on tablets. This article reflects on audiences’ experiences of the work and on musical actions more generally. We consider musical actions as short, coherent motion chunks and distinguish between several types of action that appear in the piece: gestures (communicative actions, with or without sound), reactions (where a player responds to another) and interactions (where players mutually coordinate). The musicians’ individual and collective actions create a sense of play: on the one hand, they seem free and depart from standard concert conventions; on the other hand, they seem to be following a set of rules, even if these rules are not explained to the audience. As such, we approach the piece via theories of play and relate it to earlier modernist musical games. Ultimately, 3 × 10 Musical Actions emphasises several aspects of musical actions, as social, functional, expressive, playful and embodied.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2333449" class="vrtx-external-publication">
        <div id="vrtx-publication-2333449">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2333449">
                Docherty, Claire; Iddon, Martin; Jensenius, Alexander Refsum; MacDonald, Raymond &amp; Stanley, Jane
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        ‘Are You Still There?’ Experiencing Sonic Bothy’S Verbaaaaatim.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Tempo.
                </span>
                <span class="vrtx-issn">ISSN 0040-2982.</span>
                            78(310).
            
            <a href="https://hdl.handle.net/11250/5038409">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Sonic Bothy is an inclusive experimental and new-music organisation with an ensemble of musicians with and without disabilities and neurodiversities. This article considers their audiovisual piece Verbaaaaatim (2020–21), its form marked by the context of its development and composition during the COVID-19 pandemic, using a set of interlayered perspectives that mirror the formal layers of the piece. Recorded in a single take, it comprises instrumental sounds, spoken words, written words, static and dynamic graphics and videos of the performers, aligned so that the piece seems consistently to flow onwards, although it is not always clear which element impels its forward motion. The article considers, in particular, Verbaaaaatim’s presentation of modes of embodied conviviality between its performers, the ways these find resonance in wider histories of experimental music and the ways in which its elements can be understood in an ecological framework as ‘sound actions’.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2269992" class="vrtx-external-publication">
        <div id="vrtx-publication-2269992">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2269992">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied music learning.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Schilhab, Theresa &amp; Groth, Camilla (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Embodied Learning and Teaching using the 4E Cognition Approach: Exploring Perspectives in Teaching Practices.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=FAE3940D-29AB-45F5-9190-6242B3BB7596">Routledge</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781003341604.</span>
                            
            doi: <a href="https://doi.org/10.4324/9781003341604-21">10.4324/9781003341604-21</a>.
            <a href="https://hdl.handle.net/10852/111222">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This chapter presents a pedagogical approach based on the author’s experience teaching interactive music technology design from an embodied music cognition perspective. The “musicking quadrant” is introduced as a framework to understand the experiences of those who make music in real-time (performers) and non-real-time (instrument makers, composers, producers), and those who experience music in real-time (perceivers) and non-real-time (analysts). New technologies challenge these roles and allow for new types of musical engagement that align with the 4E cognition perspectives.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200393" class="vrtx-external-publication">
        <div id="vrtx-publication-2200393">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200393">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        An Investigation of Supervised Learning in Music Mood Classification for Audio and MIDI.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Andreopoulou, Areti &amp; Boren, Braxton (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 155th Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781713894667.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3664099">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This study aims to use supervised learning – specifically, support vector machines – as a tool for a music mood classification task. Four audio and MIDI datasets, each containing over four hundred files, were composed for use in the training and testing processes. Mood classes were formed according to the valence-arousal plane, resulting in the following: happy, sad, relaxed, and tense. Additional runs were also conducted with the linear discriminant analysis, a dimensionality reduction technique commonly used to better the performance of the classifier. The relevant audio and MIDI features were carefully selected for extraction. MIDI datasets for the same music generated better classification results than corresponding audio datasets. Furthermore, when music is composed with each mood associated with a particular key instead of mixed keys, the classification accuracy is higher.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200397" class="vrtx-external-publication">
        <div id="vrtx-publication-2200397">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200397">
                Riaz, Maham &amp; Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Using SuperCollider with OSC Commands for Spatial Audio Control in a Multi-Speaker Setup.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Andreopoulou, Areti &amp; Boren, Braxton (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 155th Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781713894667.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3399808">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">With the ever-increasing prevalence of technology, its application in various music-related processes, such as music composition and performance, has become increasingly prominent. One fascinating area where technology finds utility is in music performance, offering opportunities for extensive sound exploration and manipulation. In this paper, we introduce an approach utilizing SuperCollider and Open Sound Control (OSC) commands in a multi-speaker setup, enabling spatial audio control for a truly interactive audio spatialization experience. We delve into the musicological dimensions of these distinct methods, examining their integration within a live performance setting to uncover their artistic and expressive potential. By merging technology and musicology, our research aims to unlock new avenues for immersive and captivating musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200192" class="vrtx-external-publication">
        <div id="vrtx-publication-2200192">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200192">
                Guo, Jinyue &amp; McFee, Brian
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Automatic Recognition of Cascaded Guitar Effects.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Serafin, Stefania; Fontana, Federico &amp; Willemsen, Silvin (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 26th International Conference on Digital Audio Effects.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                         Aalborg University Copenhagen.
                </span>
                            
                <span class="vrtx-pages">s. 189–195.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.7973536">10.5281/zenodo.7973536</a>.
            <a href="https://hdl.handle.net/11250/4043479">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper reports on a new multi-label classification task for guitar effect recognition that is closer to the actual use case of guitar effect pedals. To generate the dataset, we used multiple clean guitar audio datasets and applied various combinations of 13 commonly used guitar effects. We compared four neural network structures: a simple Multi-Layer Perceptron as a baseline, ResNet models, a CRNN model, and a sample-level CNN model. The ResNet models achieved the best performance in terms of accuracy and robustness under various setups (with or without clean audio, seen or unseen dataset), with a micro F1 of 0.876 and Macro F1 of 0.906 in the hardest setup. An ablation study on the ResNet models further indicates the necessary model complexity for the task.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200042" class="vrtx-external-publication">
        <div id="vrtx-publication-2200042">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200042">
                Riaz, Maham; Upham, Finn; Burnim, Kayla; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing inertial motion sensors for capturing human micromotion,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2023.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network .
                </span>
                <span class="vrtx-issn">ISSN 9789152773727.</span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.8316051">10.5281/zenodo.8316051</a>.
            <a href="https://hdl.handle.net/10852/106232">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents a study of the noise level of accelerometer data from a mobile phone compared to three commercially available IMU-based devices (AX3, Equivital, and Movesense) and a marker-based infrared motion capture system (Qualisys). The sensors are compared in static positions and for measuring human micromotion, with larger motion sequences as reference. The measurements show that all but one of the IMU-based devices capture motion with an accuracy and precision that is far below human micromotion. However, their data and representations differ, so care should be taken when comparing data between devices.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198422" class="vrtx-external-publication">
        <div id="vrtx-publication-2198422">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198422">
                Masu, Raul; Morreale, Fabio &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        The O in NIME: Reflecting on the Importance of Reusing and Repurposing Old Musical Instruments.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Ortiz, Miguel &amp; Marquez-Borbon, Adnan (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Universidad Autónoma Metropolitana.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4615889">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we reflect on the focus of “newness” in NIME research and practice and argue that there is a missing O (for “Old”) in framing our academic discourse. A systematic review of the last year’s conference proceedings reveals that most papers do, indeed, present new instruments, interfaces, or pieces of technology. Comparably few papers focus on the prolongation of existing NIMEs. Our meta-analysis identifies four main categories from these papers: (1) reuse, (2) update, (3) complement, and (4) long-term engagement. We discuss how focusing more on these four types of NIME development and engagement can be seen as an approach to increase sustainability.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198420" class="vrtx-external-publication">
        <div id="vrtx-publication-2198420">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198420">
                Karbasi, Seyed Mojtaba; Jensenius, Alexander Refsum; God?y, Rolf Inge &amp; T?rresen, Jim
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Emerging Drumming Patterns in a Chaotic Dynamical System using ZRob.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Ortiz, Miguel &amp; Marquez-Borbon, Adnan (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Universidad Autónoma Metropolitana.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/106203">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">ZRob is a robotic system designed for playing a snare drum. The robot is constructed with a passive flexible spring-based joint inspired by the human hand. This paper describes a study exploring rhythmic patterns by exploiting the chaotic dynamics of two ZRobs. In the experiment, we explored the control configurations of each arm by trying to create un- predictable patterns. Over 200 samples have been recorded and analyzed. We show how the chaotic dynamics of ZRob can be used for creating new drumming patterns.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2061616" class="vrtx-external-publication">
        <div id="vrtx-publication-2061616">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2061616">
                Lesteberg, Mari &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        MICRO and MACRO - Developing New Accessible Musicking Technologies.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Iber, Michael &amp; Enge, Kajetan (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Audio Mostly 2022: What you hear is what you see? Perspectives on modalities in sound and music interaction.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        ACM Publications.
                </span>
                <span class="vrtx-issn">ISSN 9781450397018.</span>
                            
                <span class="vrtx-pages">s. 147–150.</span>
            doi: <a href="https://doi.org/10.1145/3561212.3561231">10.1145/3561212.3561231</a>.
            <a href="https://hdl.handle.net/11250/3486287">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the development of two musical instrument prototypes developed to explore how non-haptic music technologies can be accessed from a web browser and how they can offer accessibility for people with low fine motor skills. Two approaches to browser-based motion capture were developed and tested during an iterative design process. This was followed by observational studies of two user groups: one with low fine motor skills and one with normal motor skills. Contrary to our expectations, we found that avoiding the use of buttons and mice did not make the apps more accessible for the participants with low fine motor skills. Furthermore, motion speed was considered more important for people with low motor skills than the size of the control action. The most important finding is that browser-based musical instruments using sensor-based and video-based motion tracking are not only feasible but allow for reaching much larger groups of people than previously possible. This may ultimately lead to both more personalized and accessible musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094145" class="vrtx-external-publication">
        <div id="vrtx-publication-2094145">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094145">
                Remache-Vinueza, Byron; Trujillo-León, Andrés; Clim, Maria-Alena; Sarmiento-Ortiz, Fabián; Topon-Visarrea, Liliana &amp; Jensenius, Alexander Refsum
                    <a href="javascript:void(0);" title="Hent alle deltakere" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2094145/contributors', 'vrtx-publication-contributors-2094145')">
                    [Vis alle&nbsp;7&nbsp;forfattere av denne artikkelen]</a>
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mapping Monophonic MIDI Tracks to Vibrotactile Stimuli Using Tactile Illusions.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Saitis, Charalampos; Farkhatdinov, Ildar &amp; Papetti, Stefano (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Haptic and Audio Interaction Design.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=DC752087-7122-4D3A-9E4F-382AA2F39D2C">Springer Nature</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783031150197.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3331117">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this project, we propose an algorithm to convert musical features and structures extracted from monophonic MIDI files to tactile illusions. Mapping music to vibrotactile stimuli is a challenging process since the perceptible frequency range of the skin is lower than that of the auditory system, which may cause the loss of some musical features. Moreover, current proposed models do not warrant the correspondence between the emotional response to music and the vibrotactile version of it. We propose to use tactile illusions as an additional resource to convey more meaningful vibrotactile stimuli. Tactile illusions enable us to add dynamics to vibrotactile stimuli in the form of movement, changes of direction, and localization. The suggested algorithm converts monophonic MIDI files into arrangements of two tactile illusions: “phantom motion” and “funneling”. The validation of the rendered material consisted of presenting the audio rendered from MIDI files to participants and then adding the vibrotactile component to it. The arrangement of tactile illusions was also evaluated alone. Results suggest that the arrangement of tactile illusions evokes more positive emotions than negative ones. This arrangement was also perceived as more agreeable and stimulating than the original audio. Although musical features such as rhythm, tempo, and melody were mostly recognized in the arrangement of tactile illusions, it provoked a different emotional response from that of the original audio.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2268059" class="vrtx-external-publication">
        <div id="vrtx-publication-2268059">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2268059">
                Erdem, Cagri; Wallace, Benedikte &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        CAVI: A Coadaptive Audiovisual Instrument–Composition.
                </span>
                    <span class="vrtx-parent-contributors">
                            I McPherson, Andrew &amp; Frid, Emma (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=031C5553-12A0-453E-B4FA-DC2B19B95BD2">The International Conference on New Interfaces for Musical Expression</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.21428/92fbeb44.803c24dd">10.21428/92fbeb44.803c24dd</a>.
            <a href="https://hdl.handle.net/11250/4771760">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1967503" class="vrtx-external-publication">
        <div id="vrtx-publication-1967503">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1967503">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Best versus Good Enough Practices for Open Music Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Empirical Musicology Review.
                </span>
                            16(1).
            doi: <a href="https://doi.org/10.18061/emr.v16i1.7646">10.18061/emr.v16i1.7646</a>.
            <a href="https://hdl.handle.net/11250/4488029">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Music researchers work with increasingly large and complex data sets. There are few established data handling practices in the field and several conceptual, technological, and practical challenges. Furthermore, many music researchers are not equipped for (or interested in) the craft of data storage, curation, and archiving. This paper discusses some of the particular challenges that empirical music researchers face when working towards Open Research practices: handling (1) (multi)media files, (2) privacy, and (3) copyright issues. These are exemplified through MusicLab, an event series focused on fostering openness in music research. It is argued that the &quot;best practice&quot; suggested by the FAIR principles is too demanding in many cases, but &quot;good enough practice&quot; may be within reach for many. A four-layer data handling &quot;recipe&quot; is suggested as concrete advice for achieving &quot;good enough practice&quot; in empirical music research.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1954360" class="vrtx-external-publication">
        <div id="vrtx-publication-1954360">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954360">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reflections on the Development of the Musical Gestures Toolbox for Python.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Kantan, Prithvi Ravi; Paisa, Razvan &amp; Willemsen, Silvin (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Nordic Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=2B38F065-B3E6-4061-9F0C-0BA1287EEAFF">Aalborg Universitetsforlag</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/89331">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents the Musical Gestures Toolbox (MGT) for Python, a collection of modules targeted at researchers working with video recordings. The toolbox includes video visualization techniques such as creating motion videos, motion history images, and motiongrams. These visualizations allow for studying video recordings from different temporal and spatial perspectives. The toolbox also includes basic computer vision methods, and it is designed to integrate well with audio analysis toolboxes. The MGT was initially developed to analyze music-related body motion (of musicians, dancers, and perceivers) but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, pedagogy, psychology, and medicine.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/filter?fundingIdentifier=324003&amp;fundingSource=NFR">Se alle arbeider i NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10241911" class="vrtx-external-publication">
        <div id="vrtx-publication-10241911">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10241911">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        Immersive Technologies and Their Implications in Theatre for Young Audiences:Challenges and Opportunities.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-ANTHOLOGYACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=974CB629-96E0-46F3-9E5C-64327BD7339D">C?ndido</a>.
                </span>
                <span class="vrtx-isbn">ISBN 9786587602479.</span>
            
                <span class="vrtx-pages">15 s.</span>
            
            <a href="https://hdl.handle.net/11250/3796959">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Today’s immersive technology creates a real-like ambience, offering a unique interactive and multimodal experience onstage. These implications might look promising for new aesthetic forms. However, using them on the stage in this technology-driven world raises some concerns about their potential long-term impact on young people’s development. In this paper, I will critically evaluate the use of immersive technology in TYA, questioning whether performing arts can offer a new form of aesthetic experience.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2269536" class="vrtx-external-publication">
        <div id="vrtx-publication-2269536">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2269536">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sonic Design: Explorations Between Art and Science.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-ANTHOLOGYACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=DC752087-7122-4D3A-9E4F-382AA2F39D2C">Springer Nature</a>.
                </span>
                <span class="vrtx-isbn">ISBN 9783031578922.</span>
            
                <span class="vrtx-pages">347 s.</span>
            doi: <a href="https://doi.org/10.1007/978-3-031-57892-2">10.1007/978-3-031-57892-2</a>.
            <a href="https://hdl.handle.net/11250/4377830">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This edited volume is based on a selection of contributions at an international seminar organized in May 2022 to celebrate the achievements of Professor God?y upon his retirement from the University of Oslo. The 17 chapters cover different approaches to sonic design practice and theory, giving readers historical backdrops and an overview of the current state of both artistic and scientific research in the field.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2046464" class="vrtx-external-publication">
        <div id="vrtx-publication-2046464">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2046464">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions: Conceptualizing Musical Instruments.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-MONOGRAPHACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=787501B7-4C33-4FC8-8689-95E5449219EC">MIT Press</a>.
                </span>
                <span class="vrtx-isbn">ISBN 9780262544634.</span>
            
                <span class="vrtx-pages">304 s.</span>
            doi: <a href="https://doi.org/10.7551/mitpress/14220.001.0001">10.7551/mitpress/14220.001.0001</a>.
            <a href="https://hdl.handle.net/10852/98282">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">

A techno-cognitive look at how new technologies are shaping the future of musicking.

“Musicking” encapsulates both the making of and perception of music, so it includes both active and passive forms of musical engagement. But at its core, it is a relationship between actions and sounds, between human bodies and musical instruments. Viewing musicking through this lens and drawing on music cognition and music technology, Sound Actions proposes a model for understanding differences between traditional acoustic “sound makers” and new electro-acoustic “music makers.”

What is a musical instrument? How do new technologies change how we perform and perceive music? What happens when composers build instruments, performers write code, perceivers become producers, and instruments play themselves? The answers to these pivotal questions entail a meeting point between interactive music technology and embodied music cognition, what author Alexander Refsum Jensenius calls “embodied music technology.” Moving between objective description and subjective narrative of his own musical experiences, Jensenius explores why music makes people move, how the human body can be used in musical interaction, and how new technologies allow for active musical experiences. The development of new music technologies, he demonstrates, has fundamentally changed how music is performed and perceived.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/filter?fundingIdentifier=324003&amp;fundingSource=NFR">Se alle arbeider i NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-3">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10428095" class="vrtx-external-publication">
        <div id="vrtx-publication-10428095">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428095">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        &quot;Soft&quot; and &quot;Hard&quot; research? Experiences from running a radically interdisciplinary research centre.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.uc.pt/ceis20/conferencias/alexander-jensenius-eixos-do-conhecimento-interdisciplinar/">https:/www.uc.pt/ceis20/conferencias/alexander-jensenius-eixos-do-conhecimento-interdisciplinar/</a>.
            <a href="https://hdl.handle.net/11250/5504143">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this presentation, Alexander Refsum Jensenius explores the practical and theoretical dimensions of running radically interdisciplinary research centres, focusing on his experiences with RITMO and the newly established MISHMASH Centre for AI and Creativity. He introduces the &quot;coffee machine&quot; philosophy, which advocates for physical colocation and social meeting points as essential tools to overcome institutional silos and bridge the diverse research motivations of fields like musicology, informatics, and psychology. Jensenius highlights the innovative potential of this approach through projects that translate artistic research into medical applications, such as using dance analysis software to screen infants for cerebral palsy and investigating the impact of musical stimuli on biological cells. He further details the MusicLab initiative, which scales data collection to full symphony orchestras to study embodied music cognition and human behaviour in real-life concert settings. The talk concludes by introducing MISHMASH, a national consortium dedicated to fostering human-centric AI that integrates artistic practice with technological development while navigating ethical challenges such as copyright and the preservation of cultural heritage.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428085" class="vrtx-external-publication">
        <div id="vrtx-publication-10428085">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428085">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hva er &quot;flytsonen&quot;?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504131">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">&quot;Kan kunst skape seg selv?&quot;, sp?r Abels T?rnpanel fra Kunsth?gskolen i Oslo (KHIO).
Send inn dine sp?rsm?l til v?re eksperter!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428084" class="vrtx-external-publication">
        <div id="vrtx-publication-10428084">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428084">
                Jensenius, Alexander Refsum &amp; S?rum, Tuva Marie
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nesten alle lever i sin egen stille boble.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504130">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Vi s?ker stillhet som aldri f?r, og hele 95 prosent av hodetelefonene som selges er st?yreduserende.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10427622" class="vrtx-external-publication">
        <div id="vrtx-publication-10427622">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10427622">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Netsten alle hodetelefoner er st?yreduserenede.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5503753">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Innslag p? NRK nyhetsmorgen</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10416566" class="vrtx-external-publication">
        <div id="vrtx-publication-10416566">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10416566">
                Jensenius, Alexander Refsum &amp; Lindahl, Nikoline Riis
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ekspert om st?ydemping-trenden: – Vi lever i parallelle verdener.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Aftenposten.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5482266">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">St?ydemping er blitt normalen. Sp?rsm?let er hva som skjer n?r fellesskapets lyd forsvinner.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428086" class="vrtx-external-publication">
        <div id="vrtx-publication-10428086">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428086">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Permeating Art &amp; Science Collaboration.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504132">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Panel discussion with Robertina ?ebjani?, Anetta Mona Chi?a, Alexander Refsum Jensenius. Moderated by Benedetta D&#39;Ettorre.

This panel examines the boundary zones where artistic and scientific approaches intersect, entangle, and permeate into one another. Bringing together practitioners working across more-than-human ecologies, technological imaginaries, and embodied research, the conversation will explore how meaningful collaboration can emerge from inter- and trans-disciplinary exchange.

Rather than framing art and science as opposites, we ask how their methods can become mutually generative; how artistic mindsets can expand scientific inquiry, and how scientific perspectives can deepen artistic experimentation. The session aims to discuss questions related to collaborative ethics, shared vocabularies, and the value of embracing “noise” as a catalyst for new forms of knowledge-making.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10346953" class="vrtx-external-publication">
        <div id="vrtx-publication-10346953">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10346953">
                J?re, Lisbet &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Verdens st?rste musikkeksperiment viste at publikum holdt pusten samtidig.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        澳门皇冠体育,皇冠足球比分.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5369674">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Publikum holder pusten samtidig, og n?r musikken blir emosjonell sitter alle helt stille sammen. Det avsl?rer de f?rste funnene fra verdens st?rste musikkeksperiment.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10298804" class="vrtx-external-publication">
        <div id="vrtx-publication-10298804">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298804">
                Snaprud, Per; Jensenius, Alexander Refsum; Endestad, Tor &amp; W?ien, Randi
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Concientia.
                </span>
                            
            doi: <a href="https://doi.org/https:/happeningnext.com/event/samtale-om-bevissthet-og-det-%C3%A5-skape-eid3a0d68lba3">https:/happeningnext.com/event/samtale-om-bevissthet-og-det-%C3%A5-skape-eid3a0d68lba3</a>.
            <a href="https://hdl.handle.net/11250/5330415">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">I forbindelse med utstillingen Conscientia p? Gamle Munch arrangeres en samtale om bevissthet og det ? skape.
Utgangspunktet for tema til utstillingen er bevissthet og forhold knyttet til det ? skape.
En del av utstillingen tar utgangspunkt i en serie med selvportretter av Randi W?ien basert p? MR bilder tatt av hennes eget hode. Bildene representerer et sett av intrikate m?nstre i ulike st?rrelser og sammensetninger og kan refereres til ulike organer, andre vesener og ren natur. Samtidig er de en representasjon av kunstneren slik hun er satt sammen i sitt eget hode. Keramiker Jorid Krosse lager objekter med form og m?nster som tar inspirasjon fra naturen og kan relateres til organiske strukturer, hoder og andre vesener. I samspill med maleriene vil de keramiske objektene settes i en relasjon til det kroppslige.
Samtalen om bevissthet og det ? skape bruker utstillingen som et utgangspunkt til ? f? belyst hva den skapende prosessen kan bety for v?r egen utvikling og hvordan hjernen fungerer og responderer p? skapende prosesser. Tema for samtalen vil v?re forholdet mellom kunst og bevissthet, om relasjonen mellom maleri og objekt og om hvordan det ? skape kunst kan p?virke v?r forst?else av oss selv og omgivelsene.
For tiden forskes det mye p? hva som faktisk skjer i hjernen n?r man skaper noe. Vi har f?tt med oss to av de fremste forskerne p? temaet fra universitetet i Oslo.
Alexander Refsum Jensenius er professor i musikkteknologi ved Universitetet i Oslo, hvor han ogs? leder RITMO Senter for tverrfaglige studier av rytme, tid og bevegelse og MishMash Senter for KI og kreativitet. Han forsker p? hvordan lyd og musikk p?virker kropp og sinn, bevisst og ubevisst.
Tor Endestad er f?rsteamanuensis i kognitiv- og nevropsykologi p? univeristetet i Oslo og er tilknyttet Ritmo. Han leder FRONT neurolab og forsker p? kognitiv psykologi og kognitiv nevrovitenskap med fokus p? hjerneavbildningsmetodikk. P?g?ende forskningsprosjekter omfatter studier av basale mekanismer i oppfattelse av rytme og tid, oppmerksomhet og hukommelse.
Til ? moderere samtalen har vi f?tt med oss Per Snaprud. Han er vitenskapsjournalist og f?r det hjerneforsker. Han arbeider i det Stockholm baserte magasinet 澳门皇冠体育,皇冠足球比分 og Framsteg og har tidligere v?rt virksom ved Dagens Nyheters og Sveriges Radios vitenskapsredaksjoner. Han er ogs? forfatter av boken ?Medvetandets ?terkomst, om hj?rnan, kroppen och universum?.
Victoria Johnson er fiolinist, underviser ved Institutt for musikkvitenskap og deltar i ulike forskningsprosjekter ved UiO. Hun har hatt solokonserter blant annet under Festspillene i Bergen, Ultima, Borealisfestivalen og Soundwaves i London. Hennes lidenskap for samtidsmusikk har resultert i flere bestillingsverk og plateinnspillinger. I denne sammenhengen vil hun spille musikk som er direkte komponert til bildene og objektene i utstillingen. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10298803" class="vrtx-external-publication">
        <div id="vrtx-publication-10298803">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298803">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan musikk skape fred?                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ungdomsavisa.
                </span>
                            
            doi: <a href="https://doi.org/https:/ungdomsavisa.com/index.php?artID=763&amp;navB=1">https:/ungdomsavisa.com/index.php?artID=763&amp;navB=1</a>.
            <a href="https://hdl.handle.net/11250/5330413">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">I kveld var en rekke personligheter fra musikkmilj?et samlet til debatt ved Universitetet i Oslo. Temaet var ?Kan musikk skape fred??. Blant deltakerne var Birgitte Grimstad og Lars Klevstrand , som har underholdt med musikk i flere ti?r. Debatten ble ledet av blant annet professor i musikkvitenskap ved UiO, Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10292554" class="vrtx-external-publication">
        <div id="vrtx-publication-10292554">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292554">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Are we still needed?                </span>
                            
            doi: <a href="https://doi.org/https:/filmskolen.no/artikler/2025/ki-i-filmbransjen-2-0">https:/filmskolen.no/artikler/2025/ki-i-filmbransjen-2-0</a>.
            <a href="https://hdl.handle.net/11250/5325011">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Den kunstige intelligensens frammarsj fortsetter ? v?re den st?rste kulturelle og samfunnsmessige omveltningen siden den industrielle revolusjonen. Siden fjor?rets konferanse har det skjedd vanvittig mye, derfor inviterer vi igjen til en dag fylt med internasjonale n?kkelpersoner, banebrytende prosjekter og nye perspektiver p? hvordan KI endrer m?ten vi utvikler, produserer og opplever film.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10292551" class="vrtx-external-publication">
        <div id="vrtx-publication-10292551">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292551">
                Jensenius, Alexander Refsum; Bjerkestrand, Kari Anne Vadstensvik; Johnson, Victoria Christine ?rang &amp; Rao, Shabari
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stillness – Silence.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.hf.uio.no/imv/english/research/news-and-events/events/Research-Forum/2025/research-forum-stillness-and-silence.html">https:/www.hf.uio.no/imv/english/research/news-and-events/events/Research-Forum/2025/research-forum-stillness-and-silence.html</a>.
            <a href="https://hdl.handle.net/11250/5325010">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">What can be learned by standing still in silence? Dancers typically move, and musicians move to produce sound. In this research forum, we explore the opposite: musicians and dancers who stand still in silence.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10289987" class="vrtx-external-publication">
        <div id="vrtx-publication-10289987">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10289987">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og kunstig intelligens.
                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        TV2.
                </span>
                            
            doi: <a href="https://doi.org/https:/play.tv2.no/nyheter/god-morgen-norge-y05mec7j?play=true">https:/play.tv2.no/nyheter/god-morgen-norge-y05mec7j?play=true</a>.
            <a href="https://hdl.handle.net/11250/5322859">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10289261" class="vrtx-external-publication">
        <div id="vrtx-publication-10289261">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10289261">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        What is the role of AI in creative activities?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5322244">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10285832" class="vrtx-external-publication">
        <div id="vrtx-publication-10285832">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10285832">
                Guo, Jinyue; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cross-modal Analysis of Spatial-Temporal Auditory Stimuli and Human Micromotion when Standing Still in Indoor Environments (poster).
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.17502603">10.5281/zenodo.17502603</a>.
            <a href="https://hdl.handle.net/11250/5319225">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283758" class="vrtx-external-publication">
        <div id="vrtx-publication-10283758">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283758">
                Duch, Michael Francis; Furunes, Alexander Eriksson; Jensenius, Alexander Refsum &amp; Olsen, Cecilie Sachs
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstnerisk forskning for en kompleks verden.
                </span>
                            
            doi: <a href="https://doi.org/https:/akademietforyngreforskere.no/wp-content/uploads/2025/11/Jubileumsbok-digital-3.pdf">https:/akademietforyngreforskere.no/wp-content/uploads/2025/11/Jubileumsbok-digital-3.pdf</a>.
            <a href="https://hdl.handle.net/11250/5317572">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Kunstfagene spiller en viktig rolle i ? utvide m?ten vi jobber med og forst?r komplekse samfunnsproblemer. Likevel blir kunstfagene stadig oversett og nedprioritert i forskningspolitikken. Vi sp?r derfor: hva er, b?r og kan kunstens rolle v?re i det norske forskningslandskapet?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10255055" class="vrtx-external-publication">
        <div id="vrtx-publication-10255055">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255055">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Technologies supporting research on music-related body motion.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.liser.lu/events/EXPAR2025-09-18">https:/www.liser.lu/events/EXPAR2025-09-18</a>.
            <a href="https://hdl.handle.net/11250/4403674">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">As researchers, we are increasingly using emerging technologies, such as multiple mobile eye tracking, virtual reality, and physiological indicators (e.g., heart rate and respiration) to study professionals’ individual and collaborative work practices. In this workshop, we will demonstrate how these technologies can be provided to professionals in various fields (e.g., education, healthcare, business, engineering, the arts) as a resource for self-reflection, enabling them to study and improve their own practices.

The goal of this workshop is to introduce and facilitate participants to experience novel approaches that use these emerging technologies and tools to help practitioners study their own skills and understand their learning processes. We will also show how focus groups and stimulated recall interviews can encourage and guide professionals to discover ways to incorporate these new technologies into their practice as resources for reflection and growth.

The workshop’s theme is educational practice and research, with a focus on showing how we can offer teachers theoretically driven and empirically validated methodologies for witnessing the micro-processes of collaborative mathematics learning. We will show and discuss how multiple mobile eye-tracking and virtual reality can be used in educational practice and for teacher training and professional development.

This approach and these emerging technologies are applicable not only in education, but also in all other fields of research that aim to study individual and collective practices, as well as professional learning, during the process of acquiring new skills or improving existing ones.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10255047" class="vrtx-external-publication">
        <div id="vrtx-publication-10255047">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255047">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization - Learn to use MG Toolbox.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.liser.lu/events/EXPAR2025-09-19">https:/www.liser.lu/events/EXPAR2025-09-19</a>.
            <a href="https://hdl.handle.net/11250/4159930">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is targeted at students and researchers working with video recordings You will learn to use MG Toolbox, a Python package with numerous tools for visualizing and analyzing video files. This includes visualization techniques such as motion videos, motion history images, and motiongrams; techniques that, in different ways, allow for looking at video recordings from different temporal and spatial perspectives. It also includes some basic computer vision analysis, such as extracting quantity and centroid of motion, and using such features in analysis. MG Toolbox for Python is a collection of high-level modules that generate all of the above-mentioned visualizations.The toolbox is relevant for everyone working with video recordings of humans, such as in linguistics, psychology, medicine, human-computer interaction, and educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254770" class="vrtx-external-publication">
        <div id="vrtx-publication-10254770">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254770">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Noen resultater fra tre ?r med forsknings澳门皇冠体育,皇冠足球比分 med tre orkestre.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3847365">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Denne presentasjonen oppsummerer resultater fra forskningsstudier p? og med tre skandinaviske symfoniorkestre. I alle tilfeller har b?de kvalitative og kvantitative data blitt samlet inn p? pr?ver og konserter i konsertsaler. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2359464" class="vrtx-external-publication">
        <div id="vrtx-publication-2359464">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2359464">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        What happens in the body when you stand still?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4848468">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Professor Alexander Refsum Jensenius will talk about his decade-long exploration of human micromotion. Motion data from the 365 standstill sessions he carried out during 2023 reveals lots of biomechanical noise, but also some interesting signals.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2381891" class="vrtx-external-publication">
        <div id="vrtx-publication-2381891">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2381891">
                Olsen, Cecilie Sachs; Jensenius, Alexander Refsum &amp; Duch, Michael Francis
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstnerisk forskning for en kompleks verden.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3469764">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2355958" class="vrtx-external-publication">
        <div id="vrtx-publication-2355958">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2355958">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        澳门皇冠体育,皇冠足球比分sfronten - Mensblod mot Alzheimers.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P2 Abels t?rn.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3599940">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Kan personlighetstrekket avgj?re om du liker ? danse?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391318" class="vrtx-external-publication">
        <div id="vrtx-publication-2391318">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391318">
                Riaz, Maham
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where is That Bird? The Impact of Artificial Birdsong in Public
Indoor Environments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4262079">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores the effects of nature sounds, specifically bird sounds, on human experience and behavior in indoor public environments. We report on an intervention study where we introduced an interactive sound device to alter the soundscape. Phenomenological observations and a survey showed that participants noticed and engaged with the bird sounds primarily through causal listening; that is, they attempted to identify the sound source. Participants generally responded positively to the bird sounds, appreciating the calmness and surprise it brought to the environment. The analyses revealed that relative loudness was a key factor influencing the experience. A too-high sound level may feel unpleasant, while a too-low sound level makes it unnoticeable due to background noise. These findings highlight the importance of automatic level adjustments and considering acoustic conditions in soundscape interventions. Our study contributes to a broader discourse on sound perception, human interaction with sonic spaces, and the potential of auditory design in public indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391322" class="vrtx-external-publication">
        <div id="vrtx-publication-2391322">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391322">
                Riaz, Maham
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        VentHackz: Exploring the Musicality of Ventilation Systems.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4318091">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Ventilation systems can be seen as huge examples of interfaces for musical expression, with the potential of merging sound, space, and human interaction. This paper explores conceptual similarities between ventilation systems and wind instruments and explores approaches to &quot;hacking&quot; ventilation systems with components that produce and modify sound. These systems enable the creation of unique sonic and visual experiences by manipulating airflow and making mechanical adjustments. Users can treat ventilation systems as musical interfaces by altering shape, material, and texture or augmenting vents. We call for heightened attention to the sound-making properties of ventilation systems and call for action (#VentHackz) to playfully improve the soundscapes of our indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391281" class="vrtx-external-publication">
        <div id="vrtx-publication-2391281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391281">
                Sveen, Henrik; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cyclic Patterns and Spatial Orientations in Artificial
Impulsive Autonomous Sensory Meridian Response (ASMR) Sounds.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5094288">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Autonomous Sensory Meridian Response (ASMR) is a tingling sensation in the neck and spine often triggered by specific sounds. This paper reports a study on the impact of different cyclic patterns and spatial orientations—defined here as the perceived directionality and motion of sound sources in a three-dimensional auditory space—on inducing ASMR experiences. The results demonstrate that both the type of cyclic pattern and the spatial orientation significantly influence the intensity and nature of ASMR experiences. Furthermore, the research explores synthesizing ASMR-inducing sounds while preserving key audio characteristics from acoustically recorded ASMR content. Through survey data analysis and regression modeling, distinct patterns emerge regarding the relationship between personality traits and ASMR experience. The findings contribute to a deeper understanding of ASMR as a sensory phenomenon and provide insights into the potential applications of artificially generated ASMR stimuli. Additionally, the research sheds light on the role of spatiality in ASMR experiences and the synthesis of ASMR-inducing sounds for future studies and practical applications</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2395952" class="vrtx-external-publication">
        <div id="vrtx-publication-2395952">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2395952">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        What happens in the body when you stand still?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4282149">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Professor Alexander Refsum Jensenius will talk about his decade-long exploration of human micromotion. Motion data from the 365 standstill sessions he carried out during 2023 reveals lots of biomechanical noise, but also some interesting signals.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2383681" class="vrtx-external-publication">
        <div id="vrtx-publication-2383681">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383681">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        KI og musikkens fremtid.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3930887">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Alexander Refsum Jensenius leder RITMO, Senter for tverrfaglig forskning p? rytme, tid og bevegelse, med 60 ansatte. Han leter for tiden systematisk etter mulighetene og perspektivene KI kommer med. KI er en disruptiv teknologi som griper inn i etablerte n?ringsmodeller. Hvor er vi p? vei? Mange av perspektivene kan virke overveldende. Det er imidlertid viktig ? huske p? at selv om maskiner n? er med p? ? utvikle seg selv, er det prim?rt mennesker som vil utvikle ogs? morgendagens teknologier. Han mener det er sentralt at vi i Norge er med p? denne utviklingen. Her mener kunst- og kulturfeltet har en unik mulighet til ? bidra gjennom eksperimentell utforskning og kritisk refleksjon. Han mener vi vil se flere systemer som fokuserer p? kontinuerlig samhandling mellom mennesker og maskiner, slik som n?r musikere improviserer. Men at man ikke kommer videre med KI uten at de f?r en kropp som kan sanse og handle. Og at KI-systemer vil kunne bli mer empatiske, noe som vil forbedre menneske-maskin-kommunikasjon, men som ogs? reiser mange etiske problemstillinger.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2383680" class="vrtx-external-publication">
        <div id="vrtx-publication-2383680">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383680">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Improvisasjon for muskelarmb?nd.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3247304">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2383683" class="vrtx-external-publication">
        <div id="vrtx-publication-2383683">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383683">
                Jemterud, Torkild; Jensenius, Alexander Refsum; Undheim, Vegard &amp; R?islien, Jo
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Wonderful Nachspiel med Torkild Jemterud.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4299979">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2383682" class="vrtx-external-publication">
        <div id="vrtx-publication-2383682">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383682">
                Lerdahl, Erik; Buene, Eivind; Berg, Anna &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk, stillhet og kreativitet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3470643">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Alexander Refsum Jensenius har st?tt stille 10 min hver dag i ett ?r. Han kalles Professor Stillstand. Han leder et senter med 60 ansatte som forsker p? rytme, tid og bevegelse, og vil gjerne forst? mer og dypere om hvordan lydene og inntrykkene av det vi omgir oss med p?virker oss. Hans f?rste erkjennelse er at han tror verden ville kunne bli et bedre sted om alle stod stille 10 minutter hver dag. Hva gj?r musikk og stillhet med oss selv, v?rt velv?re og v?r kreativitet?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2366536" class="vrtx-external-publication">
        <div id="vrtx-publication-2366536">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2366536">
                Schau, Kristopher &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nysgjerrige p?: rytmens hemmeligheter.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Nysgjerrige Norge.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5209031">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">I denne episoden bes?ker Kristopher forskningssenteret RITMO ved Universitetet i Oslo. Der forsker de p? alt fra trommeroboter og mikromusikalske problemstillinger til hvordan vi p?virkes av ventilasjonslyd. Han m?ter senterleder Alexander Refsum Jensenius som forteller om forskning i skj?ringspunktet mellom musikk, bevegelse, psykologi og robotikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2387009" class="vrtx-external-publication">
        <div id="vrtx-publication-2387009">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2387009">
                Riaz, Maham
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Art and Science of Immersive Sound Design in Games - What&#39;s the Secret?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4671577">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In modern games, sound design is far more than mere background noise—it conveys a story and shapes entire worlds. We will explore how gamification principles—interaction, feedback, progression, challenge, exploration, and motivation—integrate with sound design techniques such as spatial audio, adaptive mixing, and procedural audio to create responsive audio environments. Practical aspects of implementing game audio will be discussed within Unity (and Wwise).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392257" class="vrtx-external-publication">
        <div id="vrtx-publication-2392257">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392257">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization - Learn to use MG Toolbox.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4548877">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is designed for students and researchers who work with video recordings. You will learn to use MG Toolbox, a Python package with numerous tools for visualizing and analyzing video files. This includes visualization techniques such as motion videos, motion history images, and motiongrams, which allow for viewing video recordings from different temporal and spatial perspectives in various ways. It also includes some fundamental computer vision analysis, such as extracting the quantity and centroid of motion, and using such features in analysis. MG Toolbox for Python is a collection of high-level modules that generate all of the visualizations mentioned above. The toolbox is relevant for everyone working with video recordings of humans, including linguists, psychologists, medical professionals, human-computer interaction specialists, and educators in the educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392258" class="vrtx-external-publication">
        <div id="vrtx-publication-2392258">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392258">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music, RITMO and AI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4028668">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">An introduction to RITMO and ongoing research on the topic of music and AI for a workshop between researchers from the University of Oslo, Queen Mary University of London, and KTH Royal Institute of Technology.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10241909" class="vrtx-external-publication">
        <div id="vrtx-publication-10241909">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10241909">
                G?ksülük, Bilge Serdar &amp; Tidemann, Aleksander
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Digital Collaboration in Dance and Music: Remote Interaction and Improvisation with Zoom and LoLa.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.ultima.no/en/ulysses-online-session-remote-connections-co-creation-across-distances">https:/www.ultima.no/en/ulysses-online-session-remote-connections-co-creation-across-distances</a>.
            <a href="https://hdl.handle.net/11250/3871016">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This digital forum explores artistic collaboration through online platforms, examining how technology can facilitate sustainable and eco-friendly ways of creating music collectively. Participants will gain insights into artist-driven initiatives and practical tools for creative co-creation, regardless of geographical distance. Following three short presentations from invited contributors, there will be an open dialogue and exchange of experiences, allowing ULYSSES artists to share their own projects.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2329598" class="vrtx-external-publication">
        <div id="vrtx-publication-2329598">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2329598">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From Sound to Science: Open Science Practices at the RITMO Centre.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Pathos.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4461398">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">A PathOS interview about how Open Science (Open Access to publications, Open/FAIR data and software, collaborations with citizens) has made a positive or negative impact.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2224205" class="vrtx-external-publication">
        <div id="vrtx-publication-2224205">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2224205">
                Jensenius, Alexander Refsum &amp; Laczko, Balint
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4759772">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is targeted at students and researchers working with video recordings. You will learn to use MG Toolbox, a Python package with numerous tools for visualizing and analyzing video recordings. This includes visualization techniques such as motion videos, motion history images, and motiongrams; techniques that, in different ways, allow for looking at video recordings from different temporal and spatial perspectives. It also includes some basic computer vision analysis, such as extracting quantity and centroid of motion, and using such features in analysis.MG Toolbox for Python is a collection of high-level modules for generating all of the above-mentioned visualizations and analyses. This toolbox was initially developed to analyze music-related body motion but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, psychology, medicine, and educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307415" class="vrtx-external-publication">
        <div id="vrtx-publication-2307415">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307415">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Labprat #3: NM i stillstand.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3886952">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Klarer du ? st? stille til favorittl?ta di? Pr?v selv og vinn 1000kr!

Folk sier ofte at det er umulig ? ikke bevege seg til musikk, men stemmer det?

Onsdag 3. april kan du teste deg selv n?r professor Alexander Refsum Jensenius – ogs? kjent som Professor stillstand – inviterer til ?NM i stillstand? her p? Popsenteret.

Vinneren k?res samme kveld p? LAB.prat #3 med nettopp Alexander! Her f?r du ogs? vite mer om hva som faktisk skjer i kroppen n?r vi h?rer p? musikk. 

Som vanlig ledes kvelden av fasilitator og ?MC? Dr. Kjell Andreas Oddekalv, ogs? kjent som ?Dr. Kjell? (eller hele Norges Kjelledegge som han selv liker ? si) fra Hiphop orkesteret Sinsenfist. Sammen med Alexander inviterer han til en uformell samtale og Q&amp;A om kroppsrytmer og hvordan de p?virkes av omgivelsene v?re. 

I tidsrommet mellom stillstandkonkurransen og LAB.prat er Popsenteret ?pent og du er velkommen til ? bes?ke utstillingen v?r og alt den har ? by p?!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307451" class="vrtx-external-publication">
        <div id="vrtx-publication-2307451">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307451">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        20 years of concert research at the University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3393298">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In my talk I will give an overview of the concert research conducted in the fourMs Lab at the University of Oslo from the early 2000s to today. Over the years, we have explored and refined numerous data captures methods, from qualitative observation studies, interviews, and diaries to motion capture and physiological sensing. At the core has always been the attempt to shed light on the complexity of music performance. This includes understanding more about the subtleties of performer&#39;s sound-producing actions, sound-facilitating motion, and communicative and expressive gestures. It also includes the intricacies of inter-personal synchronization. Over the years, we have been able to expand from studying duos, trios, and quartets to full orchestras. Today, we have lots of data, some answers, and even more questions than when we started. An excellent starting point for future research.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307437" class="vrtx-external-publication">
        <div id="vrtx-publication-2307437">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307437">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization and Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4468971">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this workshop, I will introduce video visualization as a method for understanding more about music-related body motion. Examples will be given of various methods implemented in the standalone application VideoAnalysis and the Musical Gestures Toolbox for Python.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307448" class="vrtx-external-publication">
        <div id="vrtx-publication-2307448">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307448">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Blikksporing av musikere og maler p? scenen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4774308">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvor ser en kunstner som maler p? scenen under en konsert? Forskere fra UiO fors?ker ? finne ut av dette ved hjelp av avansert blikksporingsteknologi.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307423" class="vrtx-external-publication">
        <div id="vrtx-publication-2307423">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307423">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Ambient project at RITMO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3493696">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The AMBIENT project aims to study how such elements influence people&#39;s bodily behaviors and how they feel about the rhythms in an environment. This will be done by studying how different auditory and visual stimuli combine to create rhythms in various settings.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307385" class="vrtx-external-publication">
        <div id="vrtx-publication-2307385">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307385">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From air guitar to self-playing guitars.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4270258">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">What can air guitar performance tell about people&#39;s musical experience and how does it relate to real guitar performance? Alexander Refsum Jensenius will tell about his decade-long research into music-related body motion of both performers and perceivers. He will also tell about how this has informed new performance paradigms, including the self-playing guitars that will be showcased at the festival.

?

Alexander Refsum Jensenius is a professor of music technology at the University of Oslo and Director of RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion. He studies how and why people move to music and uses this knowledge to create new music with untraditional instruments. He is widely published, including the books Sound Actions and A NIME Reader.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307419" class="vrtx-external-publication">
        <div id="vrtx-publication-2307419">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307419">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied music-related design.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4697973">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Abrahamson et al. (2022) recently called for a merging of Embodied Design-Based Research and Learning Analytics to establish a coherent and integrated focus on Multimodal Learning Analytics of Embodied Design. In Spring 2022, members of EDRL and selected international collaborators of the lab participated in “Rhythm Rising,” a workshop week hosted at University of Oslo’s RITMO Centre for Interdisciplinary Studies in Rhythm, Time, and Motion. The workshop featured activities for graduate students to learn the scientific research methodologies of gathering physical, physiological, and neurobiological data from study participants engaged in interactive learning of STEM content. The activities combined the respective expertise of Abrahamson (learning sciences) and Jensenius (embodied music cognition and technology) to investigate sensorimotor micro-processes hypothesized to form the cognitive basis of conceptual understandings, such as hand- and eye actions leading to the emergence of mathematical insight. Whereas the Oslo workshop spurred great enthusiasm among the graduate students, its duration only allowed time for initial data collection. Therefore, we would like to regather in Spring 2024 to continue our collaborative work and to share insights about data analysis, visualization, and interpretation. Concurrently, we’ll develop ideas for future joint research projects.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307446" class="vrtx-external-publication">
        <div id="vrtx-publication-2307446">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307446">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusicLab som et ?pen forskningsprosjekt mellom RITMO og UB.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4144602">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">MusicLab er et 澳门皇冠体育,皇冠足球比分 mellom RITMO og Universitetsbiblioteket. M?let er ? utforske nye m?ter ? samle inn og formidle musikkrelaterte forskningsdata p?.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2323047" class="vrtx-external-publication">
        <div id="vrtx-publication-2323047">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2323047">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ?pen forskning muliggj?r forskningsn?r utdanning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4787234">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Selv om vi liker ? si at driver med forskningsbasert utdanning, er organiseringen av forskning og utdanning gjerne plassert i ulike siloer, skriver Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2305592" class="vrtx-external-publication">
        <div id="vrtx-publication-2305592">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305592">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Conducting Semi-Structured Dance Research in Motion Capture Labs.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3340840">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2305588" class="vrtx-external-publication">
        <div id="vrtx-publication-2305588">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305588">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Remote Intercorporeality Through Telematic Technologies.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4099968">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2305576" class="vrtx-external-publication">
        <div id="vrtx-publication-2305576">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305576">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Remote Dance Improvisation Through Advanced Telematic Technologies.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3282141">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2305556" class="vrtx-external-publication">
        <div id="vrtx-publication-2305556">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305556">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Muskelmusikk.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4432418">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva skjer i musklene n?r vi fors?ker ? st? stille? Hvordan kan men lage musikk fra kroppen. I pausen p? Forsker Grand Prix vil jeg underholde med et sceneshow hvor jeg utforsker interaktive muskelarmb?nd og en musikkhanske.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2305591" class="vrtx-external-publication">
        <div id="vrtx-publication-2305591">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305591">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Immersive Technologies in TYA: Bodily Concerns, Challenges and Opportunities.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3243685">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2305581" class="vrtx-external-publication">
        <div id="vrtx-publication-2305581">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305581">
                G?ksülük, Bilge Serdar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Immersive Technologies and Their Implications in Theatre for Young Audiences.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5177514">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2257455" class="vrtx-external-publication">
        <div id="vrtx-publication-2257455">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2257455">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvorfor trenger vi lisenser p? data?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4040662">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">QualiFAIR huben inviterer til en presentasjon og en diskusjon om rettighetene til data og behov for lisenser for data og annet forskningsmateriale.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2234856" class="vrtx-external-publication">
        <div id="vrtx-publication-2234856">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2234856">
                Jensenius, Alexander Refsum &amp; Lilleeng, Sverre
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Professor stillstand.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4263051">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2349756" class="vrtx-external-publication">
        <div id="vrtx-publication-2349756">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2349756">
                Riaz, Maham
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Spatial Audio Recordings from Commercially Available 360-degree Video Cameras.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4399466">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper investigates the spatial audio recording capabilities of various commercially available 360-degree cameras (GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S). A dedicated ambisonics audio recorder (Zoom H3VR) was used for comparison. Six action sequences were performed around the recording setup, including impulsive and continuous vocal and non-vocal stimuli. The audio streams were extracted from the videos and compared using spectrograms and anglegrams. The anglegrams show adequate localization in ambisonic recordings from the GoPro MAX and Zoom H3VR. All cameras feature undocumented noise reduction and audio enhancement algorithms, use different types of audio compression, and have limited audio export options. This makes it challenging to use the spatial audio data reliably for research purposes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2349762" class="vrtx-external-publication">
        <div id="vrtx-publication-2349762">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2349762">
                Riaz, Maham &amp; Theodoridis, Ioannis
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ventilation hacking.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4350817">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">We examine innovative approaches to mitigate the issue of unwanted ventilation noise, transforming it from a disruptive element into a source of ambient or musical sound. We propose a range of solutions, from mechanical adjustments to acoustic treatments and digital interventions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2243455" class="vrtx-external-publication">
        <div id="vrtx-publication-2243455">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2243455">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stod stille hver dag i 10 minutter.
                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK Helgemorgen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3384163">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2292205" class="vrtx-external-publication">
        <div id="vrtx-publication-2292205">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292205">
                Guo, Jinyue
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Four 360-Degree Cameras for Spatial Video Recording and Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4105957">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper reports on a desktop investigation and a lab experiment comparing the video recording capabilities of four commercially available 360-degree cameras: GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S. The four cameras all use different recording formats and settings and have varying video quality and software support. This makes it difficult to conduct analyses and compare between devices. We have implemented new functions in the Musical Gestures Toolbox (MGT) for reading and merging files from the different platforms. Using the capabilities of FFmpeg, we have also made a new function for converting between different 360-degree video projections and formats. This allows (music) researchers to exploit 360-degree video recordings using regular videobased analysis pipelines.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2319249" class="vrtx-external-publication">
        <div id="vrtx-publication-2319249">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2319249">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Can doing nothing tell us everything?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5146610">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Can doing nothing tell us everything? Meet Professor Alexander Refsum Jensenius, a music researcher exploring the deep connections between sound, space, and the human body. Through his fascinating studies on stillness and motion, Alexander has discovered surprising insights into how we interact with our environment.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2274681" class="vrtx-external-publication">
        <div id="vrtx-publication-2274681">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2274681">
                Jensenius, Alexander Refsum &amp; Jerve, Karoline Ruderaas
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Verdens st?rste musikkeksperiment.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ballade.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4276767">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">I kveld m?tes NRKs popul?rvitenskapelige radioprogram Abels t?rn, KORK og forskningsprosjektet MusicLab for ? m?le hva som skjer mellom musikere og publikum n?r de utsettes for musikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392100" class="vrtx-external-publication">
        <div id="vrtx-publication-2392100">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392100">
                Jemterud, Torkild; Jensenius, Alexander Refsum; L?seth, Guro Engvig &amp; Holthe, Kolbj?rn
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ABELS KORK - Verdens st?rste(?) musikkeksperiment.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5031209">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvordan p?virker musikk oss? Hva skjer i hjernen v?r n?r vi h?rer en melodi vi liker – eller misliker? Hvorfor reagerer vi forskjellig p? ulike typer musikk? Og hvordan klarer et helt orkester ? spille plettfritt sammen? Og forresten: trenger de egentlig ? ha en dirigent? Hver fredag svarer panelet i Abels t?rn p? alle slags vitenskapelige sp?rsm?l, store og sm?, fra lytterne. Noen vil langt ut i verdensrommet, og andre er mer opptatt av hva som skjer p? kj?kkenbenken. Men musikk er noe vi alle har et forhold til. Den er rundt oss hele tiden, og det er mye ? undre seg over n?r det gjelder musikk og hvordan den taler til oss p? dype personlige plan. Derfor har Abels t?rn og KORK g?tt sammen med RITMO og Universitetsbiblioteket for ? lage en musikalsk utgave av det popul?re vitenskapsprogrammet. Vi introduserer: Abels KORK!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2370702" class="vrtx-external-publication">
        <div id="vrtx-publication-2370702">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2370702">
                Jensenius, Alexander Refsum; Riaz, Maham; Oldfield, Thomas L &amp; Juarez, Karenina
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO-studenter presenterer nye installasjoner.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4494725">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Studenter tilknyttet RITMO stiller ut prosjektene sine p? Popsenteret: en interaktiv symaskin fra 1911, et lyttende og snakkende speil, og et interaktivt maleri. Hvordan kan slike objekter gi musikalske opplevelser?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2370700" class="vrtx-external-publication">
        <div id="vrtx-publication-2370700">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2370700">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Challenges and Possibilities of Open Music Data.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4184456">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The Sempre Autumn conference was an online student study day, held on Friday 8th November 2024, with a combination of student presentations, research speed dating, and a special session on open research featuring Professor Iain Brennan (University of Hull), Professor Tuomas Eerola (Durham University), and Professor Alexander Refsum Jensenius (University of Oslo). The event was open to doctoral students at any stage of their research and those thinking of applying for doctoral study. We invited proposals for short presentations (10 minutes + 5 for Q&amp;A) from doctoral students, on any aspect of music psychology or music education.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2305608" class="vrtx-external-publication">
        <div id="vrtx-publication-2305608">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305608">
                G?ksülük, Bilge Serdar; Tidemann, Aleksander &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Telematic Testing: One Performance in Three Locations.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4686167">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2307459" class="vrtx-external-publication">
        <div id="vrtx-publication-2307459">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307459">
                Jensenius, Alexander Refsum; Vo, Synne; Kelkar, Tejaswinee &amp; Kjus, Yngvar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikksnakk: Musikk p? Spotify - hvordan funker algoritmene?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4821486">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvorfor er det slik at plateselskaper ?nsker at artister skal lage TikTok?er for ? promotere musikken sin? Hva bestemmer hvilke musikkanbefalinger du f?r i Spotify? Og hvordan bruker plateselskapene dataene dine til ? generere klikk og lytt? Bli med p? en samtale om algoritmer p? apper som TikTok og Spotify - og hvordan de p?virker musikksmaken din!

Til ? diskutere dette kommer:

- Synne Vo. Hun er en artist som slo igjennom p? TikTok, og bruker plattformen aktivt for ? promotere musikken sin. Hun kommer til panelet for ? dele sine erfaringer med bransjen og appene.

- Yngvar Kjus. Han er professor i musikk og medier p? UiO, og har forsket mye p? popul?rmusikk, musikkproduksjon og musikkbransjen.

- Tejaswinee Kelkar. Hun er er en sanger og forsker innen musikk og bevegelse. Hun har tidligere jobbet som dataanalytiker i Universal Music Norway og ved RITMO Center of Excellence ved Universitetet i Oslo.

Samtalen ledes av Alexander Refsum Jensenius. Han er professor i musikk ved Universitetet i Oslo, og leder av RITMO - Senter for tverrfaglig forskning p? rytme, tid og bevegelse. Han pr?ver hele tiden ? forst? mer om hvordan og hvorfor mennesker beveger seg til musikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392254" class="vrtx-external-publication">
        <div id="vrtx-publication-2392254">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392254">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interdisiplin?ritet - et musikkperspektiv.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3909868">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Skal snakke om skj?ringspunktet mellom psykologi, informatikk og musikk og arbeidet som foreg?r p? instituttet han leder: RITMO, Senter for Interdisiplin?re studier i rytme, tid og bevegelse ved Universitetet i Oslo. Alexander er b?de forsker og musiker. Han har en sammensatt bakgrunn best?ende av musikk, informatikk, fysikk og matematikk og hans praktisk rettede forskning har bredt nedslagsfelt. Digitale verkt?y som har blitt utviklet ved RITMO blir n? ogs? brukt innen medisinsk forskning p? ADHD og Cerebral Parese.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307457" class="vrtx-external-publication">
        <div id="vrtx-publication-2307457">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307457">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        20 Years of Piano Research at the University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4045963">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this lecture-recital, I will present piano-related research from the Department of Musicology over the last twenty years. I will also reflect on my role in this history, both as an artist and scientist. Finally, I will scrutinize the department&#39;s new Disklavier while performing various exploratory etudes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281190" class="vrtx-external-publication">
        <div id="vrtx-publication-2281190">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281190">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hjernen i sentrum: Kunst.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4115688">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvorfor er noen musikalske og andre ikke? Hvordan har det seg at kunst kan treffe oss s? voldsomt - og s? ulikt! Ulike kunstneriske uttrykk som musikk, malerkunst, litteratur, dans og teater kommer uten fasit og tolkes vidt forskjellig fra person til person. Er det hjernen som styrer dette? Det er ?penbart at hjernen v?r er aktiv og ikke passiv n?r vi opplever kunst. Hvorfor er det s?nn? Gir kunstneriske opplevelser god hjernetrim? Er kunst viktig for hjernehelsen?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281194" class="vrtx-external-publication">
        <div id="vrtx-publication-2281194">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281194">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusicLab as an Open Science innovation project between a research centre and the University library.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5162893">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2281187" class="vrtx-external-publication">
        <div id="vrtx-publication-2281187">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281187">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interdisciplinarity.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4898221">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2281188" class="vrtx-external-publication">
        <div id="vrtx-publication-2281188">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281188">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk, Data og KI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5002588">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Musikk er en av de mest komplekse menneskelige kommunikasjonsformene som finnes og egner seg derfor godt for ? utforske kunstig intelligens. Presentasjonen beskriver hvordan musikkforskere, psykologer og informatikere jobber sammen ved RITMO for ? forst? mer om rytme, tid og bevegelse hos mennesker og maskiner.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2140991" class="vrtx-external-publication">
        <div id="vrtx-publication-2140991">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140991">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Wishful thinking about CVs: Perspectives from a researcher.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3980287">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2140990" class="vrtx-external-publication">
        <div id="vrtx-publication-2140990">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140990">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions: An Embodied approach to a Digital Organology.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5084371">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my forthcoming book on &quot;musicking in an electronic world&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. This will be used at the heart of a new organology that embraces the qualities of both acoustic and electroacoustic instruments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2146554" class="vrtx-external-publication">
        <div id="vrtx-publication-2146554">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2146554">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4157847">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my recent book &quot;Sound Actions&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2140993" class="vrtx-external-publication">
        <div id="vrtx-publication-2140993">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140993">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Explorations of human micromotion through standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3551789">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I will stand still for ten minutes around noon every day, in a different room each day. The aim is to collect data about my micromotion and compare it to the qualities of the environment. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for conscious and unconscious control of musical sounds. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2140994" class="vrtx-external-publication">
        <div id="vrtx-publication-2140994">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140994">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4483927">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my forthcoming book on &quot;musicking in an electronic world&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. This will be used at the heart of a new organology that embraces the qualities of both acoustic and electroacoustic instruments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202439" class="vrtx-external-publication">
        <div id="vrtx-publication-2202439">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202439">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions: Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4981052">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">How do new technologies change how we perform and perceive music? What happens when composers build instruments, performers write code, perceivers become producers, and instruments play themselves? These are questions addressed in the new book by Professor Alexander Refsum Jensenius: Sound Actions: Conceptualizing Musical Instruments published by the MIT Press.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202432" class="vrtx-external-publication">
        <div id="vrtx-publication-2202432">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202432">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Explorations of human micromotion through standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4025813">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I will stand still for ten minutes around noon every day, in a different room each day. The aim is to collect data about my micromotion and compare it to the qualities of the environment. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for conscious and unconscious control of musical sounds.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202435" class="vrtx-external-publication">
        <div id="vrtx-publication-2202435">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202435">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions - Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5065826">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2202429" class="vrtx-external-publication">
        <div id="vrtx-publication-2202429">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202429">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Human Micromotion Through Standing Still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3725477">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Moving slowly likely puts us into a special state of mind. Subjective reports from various practices including dance, Tai Chi and walking meditation suggest that slow movements can bring participants into a special state involving increased relaxation and awareness. Interestingly, relatively little research has been performed specifically to understand the underlying mechanisms and the possible applications of human slow movement. One reason might be that slow movements are not common in day-to-day life: when we want to move – for example to pick up our cup of coffee - we usually want to do it now. Some evidence suggests that humans tend to avoid moving slowly in different tasks, for example, when improvising movements together. The goal of this meeting is to bring together scholars and practitioners interested in slow movement, and to foster interdisciplinary research on this somewhat neglected topic. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202443" class="vrtx-external-publication">
        <div id="vrtx-publication-2202443">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202443">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tverrfaglig forskning p? rytme, tid og bevegelse.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3512336">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">RITMO er et unikt SFF p? grunn av sin radikalt tverrfaglige oppbygning. Hvordan fungerer det i praksis?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200281" class="vrtx-external-publication">
        <div id="vrtx-publication-2200281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200281">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Design in Unity: Immersive Audio for Virtual Reality Storytelling.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4338498">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Research talk on sound design for games and immersive environments. The Unity game engine is used for environmental modeling. The Oculus Spatializer plugin provides control over binaural spatialization with native head related transfer functions (HRTF). Game scenes included C# scripts, which accounted for intermittent emitters (randomly triggered sounds of nature, critters and birds), crossfades, occlusion and raycasting. In the mixing stage, mixer groups, mixer snapshsots, snapshot triggers, SFX reverb sends, and low/high-pass filters were some of the tools demonstrated.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2188583" class="vrtx-external-publication">
        <div id="vrtx-publication-2188583">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2188583">
                Jensenius, Alexander Refsum &amp; Tytko, James
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Emerging tech creates music from dance movements.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        The Naked Scientists.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3407299">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Listen to the melodies composed with the help of motion capture body suits...</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2183304" class="vrtx-external-publication">
        <div id="vrtx-publication-2183304">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2183304">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Still Standing: The effects of sound and music on people standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4893857">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I have been standing still for ten minutes around noon every day, in a different room each day. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for the conscious and unconscious control of musical sounds.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2183301" class="vrtx-external-publication">
        <div id="vrtx-publication-2183301">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2183301">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Still Standing: The effects of sound and music on people standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4375585">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I have been standing still for ten minutes around noon every day, in a different room each day. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for the conscious and unconscious control of musical sounds.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2106038" class="vrtx-external-publication">
        <div id="vrtx-publication-2106038">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2106038">
                Jensenius, Alexander Refsum &amp; Poutaraud, Joachim
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4733516">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is targeted at students and researchers working with video recordings. Even though the workshop will be based on quantitative tools, the aim is to provide solutions for qualitative research. This includes visualization techniques such as motion videos, motion history images, and motiongrams, which, in different ways, allow for looking at video recordings from different temporal and spatial perspectives. It also includes basic computer vision analysis modules, such as extracting quantity and centroid of motion, and using such features in analysis.

The participants will learn to use the Musical Gestures Toolbox for Python, a collection of high-level modules for easily generating all of the above-mentioned visualizations and analyses. This toolbox was initially developed for analyzing music-related body motion but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, psychology, medicine, and educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2172754" class="vrtx-external-publication">
        <div id="vrtx-publication-2172754">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2172754">
                Olaisen, Sofie Retterst?l; Jensenius, Alexander Refsum &amp; Vuoskoski, Jonna Katariina
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Una m? danse n?r ho h?yrer musikk: – Eit urgamalt instinkt.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3704477">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Urgamle instinkt blir sett i sving n?r hjernen din oppfattar musikk. No kan forskarane ogs? sj? danselysta i augo dine.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2180726" class="vrtx-external-publication">
        <div id="vrtx-publication-2180726">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180726">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ny teknologi vil alltid endre musikken.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-READEROPINION">
                        Aftenposten (morgenutg. : trykt utg.).
                </span>
                <span class="vrtx-issn">ISSN 0804-3116.</span>
                            
            
            <a href="https://hdl.handle.net/11250/5070106">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Musikerne forsvant ikke med grammofonen, det gj?r de ikke n? heller.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2180713" class="vrtx-external-publication">
        <div id="vrtx-publication-2180713">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180713">
                G?ksülük, Bilge Serdar
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Implications of Laban/Bartenieff Movement Studies in the Field of Dance Anthropology.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4186821">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In my presentation, I will discuss the application of LBMS in the field of dance anthropology and provide insights into its future implications. While kinetography (Labanotation) is commonly recognized and used in dance anthropology, the embodied aspects of Laban’s work are often overlooked. Therefore, I will focus on the embodied aspects of LBMS in dance anthropology, rather than just notation. To start, I will provide a general overview of how dance analysis is understood within the discipline of dance anthropology. Then, I will argue how LBMS impacts the discourse of dance analysis. In the second part of my presentation, I will bolster my argument with an example by analyzing a short segment of the Caucasian folk dance &#39;Zafak&#39; performed by the Nalmes State Folk Dance Company of Adygea. Through this example, I aim to demonstrate how dance analysis using LBMS can contribute to anthropological research.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200163" class="vrtx-external-publication">
        <div id="vrtx-publication-2200163">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200163">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        An Investigation of Supervised Learning in Music Mood Classification for Audio and MIDI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4798154">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This study aims to use supervised learning – specifically, support vector machines – as a tool for a music mood classification task. Four audio and MIDI datasets, each containing over four hundred files, were composed for use in the training and testing processes. Mood classes were formed according to the valence-arousal plane, resulting in the following: happy, sad, relaxed, and tense. Additional runs were also conducted with the linear discriminant analysis, a dimensionality reduction technique commonly used to better the performance of the classifier. The relevant audio and MIDI features were carefully selected for extraction. MIDI datasets for the same music generated better classification results than corresponding audio datasets. Furthermore, when music is composed with each mood associated with a particular key instead of mixed keys, the classification accuracy is higher.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2180718" class="vrtx-external-publication">
        <div id="vrtx-publication-2180718">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180718">
                G?ksülük, Bilge Serdar
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Challenges and Possibilities of The Hybrid Format Creative Process.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3786674">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2180715" class="vrtx-external-publication">
        <div id="vrtx-publication-2180715">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180715">
                G?ksülük, Bilge Serdar
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Phenomenological Inquiry of Movement as a Methodology in Performing Arts Education.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4138011">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2180729" class="vrtx-external-publication">
        <div id="vrtx-publication-2180729">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180729">
                G?ksülük, Bilge Serdar
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied Knowledge Production Through Telematics in the Hybrid Realm.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4928914">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2180730" class="vrtx-external-publication">
        <div id="vrtx-publication-2180730">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180730">
                G?ksülük, Bilge Serdar
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Digital Intervention Into Dramaturgical Thoughts.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3880643">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2180721" class="vrtx-external-publication">
        <div id="vrtx-publication-2180721">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180721">
                G?ksülük, Bilge Serdar
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hybrid Format Movement Training Under the Pandemic Measures: A Clash Between Physical and Digital Realm.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3562146">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2180731" class="vrtx-external-publication">
        <div id="vrtx-publication-2180731">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180731">
                G?ksülük, Bilge Serdar
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Performative Quality of Aesthetics in Bio-Cultural Paradigm.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4039444">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2192960" class="vrtx-external-publication">
        <div id="vrtx-publication-2192960">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192960">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        How Findable, Accessible, Interoperable and Reusable data enables research-led education.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4209609">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">FAIR data is an essential component of the open research ecosystem. In this article, Alexander Refsum Jensenius argues that &quot;FAIRification&quot; can also benefit research-based and research-led education, providing opportunities to bring together different university missions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2181261" class="vrtx-external-publication">
        <div id="vrtx-publication-2181261">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2181261">
                Jensenius, Alexander Refsum &amp; Tidemann, Grethe
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cristin forsvinner. Uklart hva som blir bedre i det nye systemet.                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Uniforum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3232429">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Cristin, det nasjonale systemet for forskningsdokumentasjon, skal erstattes av Nasjonalt vitenarkiv. Men hva som blir bedre i det nye systemet kan verken IT-direkt?ren eller forskningsdirekt?ren ved UiO svare p?.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200070" class="vrtx-external-publication">
        <div id="vrtx-publication-2200070">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200070">
                Guo, Jinyue
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Automatic Recognition of Cascaded Guitar Effects.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4511359">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200050" class="vrtx-external-publication">
        <div id="vrtx-publication-2200050">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200050">
                Riaz, Maham; Upham, Finn; Burnim, Kayla; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing inertial motion sensors for capturing human micromotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5122145">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents a study of the noise level of accelerometer data from a mobile phone compared to three commercially available IMU-based devices (AX3, Equivital, and Movesense) and a marker-based infrared motion capture system (Qualisys). The sensors are compared in static positions and for measuring human micromotion, with larger motion sequences as reference. The measurements show that all but one of the IMU-based devices capture motion with an accuracy and precision that is far below human micromotion. However, their data and representations differ, so care should be taken when comparing data between devices.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200082" class="vrtx-external-publication">
        <div id="vrtx-publication-2200082">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200082">
                Riaz, Maham
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Using SuperCollider with OSC Commands for Spatial Audio Control in a Multi-Speaker Setup.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3615973">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">With the ever-increasing prevalence of technology, its application in various music-related processes, such as music composition and performance, has become increasingly prominent. One fascinating area where technology finds utility is in music performance, offering opportunities for extensive sound exploration and manipulation. In this paper, we introduce an approach utilizing SuperCollider and Open Sound Control (OSC) commands in a multi-speaker setup, enabling spatial audio control for a truly interactive audio spatialization experience. We delve into the musicological dimensions of these distinct methods, examining their integration within a live performance setting to uncover their artistic and expressive potential. By merging technology and musicology, our research aims to unlock new avenues for immersive and captivating musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2131190" class="vrtx-external-publication">
        <div id="vrtx-publication-2131190">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2131190">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring large datasets of human, music-related standstill.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4436546">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I will stand still for ten minutes around noon every day, in a different room each day. The aim is to collect data about my micromotion and compare it to the qualities of the environment. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for conscious and unconscious control of musical sounds. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2127653" class="vrtx-external-publication">
        <div id="vrtx-publication-2127653">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2127653">
                Jensenius, Alexander Refsum &amp; Rosenberg, Ingvild
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Unik forskningskonsert.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P1.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4857891">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2202456" class="vrtx-external-publication">
        <div id="vrtx-publication-2202456">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202456">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Introducing MusicLab.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3773555">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In 2021, one of the world’s finest string quartets, The Danish String Quartet (DSQ), and a large team of international researchers based at RITMO, co-hosted MusicLab Copenhagen – a groundbreaking event where DSQ performed their best repertoire while researchers experimented with, measured, and analyzed the experiences and behavior of musicians and audience. Some of the questions we tried to answer were: Do we become one grand “we” when absorbed in music together? How do we synchronize our bodily rhythms with the music during a concert? As an innovative musical and scientific format, the concert has been widely reported and won “Event of the Year” by the Danish National Broadcasting Corporation (DR P2). Now, the researchers have completed their analyses, and we are excited to share findings in a hybrid launch event.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2127652" class="vrtx-external-publication">
        <div id="vrtx-publication-2127652">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2127652">
                Haaland, Tonette N. &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        SSO deltar i forskning: – Skal finne ut hvordan musikk p?virker oss.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Rogalands avis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4789083">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Stavanger Symfoniorkester (SSO) inviterer elever p? 5.-10. trinn p? konsert, for ? gjennomf?re forskning.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2127649" class="vrtx-external-publication">
        <div id="vrtx-publication-2127649">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2127649">
                Jensenius, Alexander Refsum &amp; Burnim, Kayla
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Forskere inntok Konserthuset.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Stavanger Aftenblad.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3741506">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hundrevis av elever kom for ? h?re p? Stavanger symfoniorkester. Mens orkesteret spilte, var musikerne, dirigenten og publikum del av et unikt forskningsprosjekt.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2139082" class="vrtx-external-publication">
        <div id="vrtx-publication-2139082">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2139082">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Rhythmic Data Science.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3408980">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Rhythm is everywhere, from how we walk, talk, dance and play to telling stories about our past and even predicting the future. Rhythm is key to how we interact with our world. Our heartbeat, nervous system, and other bodily cycles work through rhythm. As such, rhythm is a crucial aspect of human action and perception, and it is in complex interaction with the world&#39;s cultural, biological and mechanical rhythms. At RITMO, they research rhythmic phenomena and their complex relationships with the rhythms of human bodies and brains. In the talk, Alexander will present examples of how they record, synchronize, and analyze data of complex, rhythmic human behavior, such as real-world concerts.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198424" class="vrtx-external-publication">
        <div id="vrtx-publication-2198424">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198424">
                Bukvic, Ivica Ico; Jensenius, Alexander Refsum; Wittman, Hollis &amp; Masu, Raul
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Implementing the new template for NIME music proceedings with the community.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4600332">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">We will analyze a new possible template for NIME submissions which would simplify the integration of NIME music performances in the COMPEL, a database which facilitates navigation across different categories (pieces, persons, instruments). The template emerges from a workshop run last year at NIME about the structure of COMPEL and the process of entering all performances presented last year. From this workshop we expect to improve the template and validate it with a community.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198418" class="vrtx-external-publication">
        <div id="vrtx-publication-2198418">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198418">
                Karbasi, Seyed Mojtaba; Jensenius, Alexander Refsum; God?y, Rolf Inge &amp; T?rresen, Jim
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Emerging Drumming Patterns in a Chaotic Dynamical System using ZRob.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3524390">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">ZRob is a robotic system designed for playing a snare drum. The robot is constructed with a passive flexible spring-based joint inspired by the human hand. This paper describes a study exploring rhythmic patterns by exploiting the chaotic dynamics of two ZRobs. In the experiment, we explored the control configurations of each arm by trying to create un- predictable patterns. Over 200 samples have been recorded and analyzed. We show how the chaotic dynamics of ZRob can be used for creating new drumming patterns.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198337" class="vrtx-external-publication">
        <div id="vrtx-publication-2198337">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198337">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Innovasjon og ?pen forskning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4308136">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2198421" class="vrtx-external-publication">
        <div id="vrtx-publication-2198421">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198421">
                Masu, Raul; Morreale, Fabio &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The O in NIME: Reflecting on the Importance of Reusing and Repurposing Old Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4521771">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we reflect on the focus of “newness” in NIME research and practice and argue that there is a missing O (for “Old”) in framing our academic discourse. A systematic review of the last year’s conference proceedings reveals that most papers do, indeed, present new instruments, interfaces, or pieces of technology. Comparably few papers focus on the prolongation of existing NIMEs. Our meta-analysis identifies four main categories from these papers: (1) reuse, (2) update, (3) complement, and (4) long-term engagement. We discuss how focusing more on these four types of NIME development and engagement can be seen as an approach to increase sustainability.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2182244" class="vrtx-external-publication">
        <div id="vrtx-publication-2182244">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2182244">
                Jensenius, Alexander Refsum &amp; Zürn, Christof
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Standing still with Alexander Refsum Jensenius.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        The Power of Music Thinking.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4050287">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">What is the use of standing still for 10 minutes? I was asking myself when I saw a post on social media. It was a double picture of a man with a mobile phone around his neck displaying some data, and another picture showed the view he saw at that moment. I learned that he stood there for 10 minutes without any movement, listening to the sound that was already there. There were many pictures like this, and I decided to get in contact.

So, today, we are in Oslo. We speak with Alexander Refsum Jensenius, a professor of music technology at the University of Oslo, a book author, a music researcher and researching musician working in the fields of embodied music cognition and new interfaces for musical expression. 

Alexander shares with us his experiences while performing and testing with artistic methods of embodied listening and how people experience music and sound. This goes from experiments with and without the conductor of a Symphony Orchestra to the sounds of our kitchen appliances.

We talk about his motion capture lab, where a person’s exact location and micro-movements can be detected while they hear different kinds of music, and how the researchers can understand what moves them. 

Alexander shares insights about the Norwegian Championship of Stand Still, where until now, 1000s of people have participated, and the winner is the person with the lowest average velocity on standing the stillest over some time. 

Alexander explains the interplay of body and mind and reveals some secrets on how to move people, for example, on the dance floor or to calm them down. It all has to do with our bpm, the average heartbeat of about 60 beats a minute. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198335" class="vrtx-external-publication">
        <div id="vrtx-publication-2198335">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198335">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Observing spaces while standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4024536">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I stand still for ten minutes around noon every day, in a different room each day. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. Previously, I have been interested in the impact of music. Now, I am listening to ventilation systems, elevators, and people walking and talking and reflecting on how they influence my body and
mind. The aim is to understand more about the rhythms of the environment.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198334" class="vrtx-external-publication">
        <div id="vrtx-publication-2198334">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198334">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3875060">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">RITMO er et tverrfaglig senter som ?nsker ? avdekke de kognitive mekanismene som ligger til grunn for menneskelig rytme, i musikk, bevegelse og audiovisuelle medier.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2192098" class="vrtx-external-publication">
        <div id="vrtx-publication-2192098">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192098">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvordan en videreutvikling av vurderingssystemet endrer hvordan man jobber faglig.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4901820">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2192099" class="vrtx-external-publication">
        <div id="vrtx-publication-2192099">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192099">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og kunstig intelligens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4190551">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Kunstig intelligens kan allerede skrive noter og mikse musikk. I tiden fremover vil vi se mange eksempler p? hvordan maskinl?ring tas i bruk i musikkut?ving og -produksjon og til ? skape nye lytteopplevelser. Men hva er egentlig musikalsk kunstig intelligens? Hva vil det si ? trene en maskinl?ringsmodell? Vil maskinene gj?re musikere og komponister overfl?dige? Denne forelesningen vil gi deg en del svar, men ogs? flere sp?rsm?l.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094857" class="vrtx-external-publication">
        <div id="vrtx-publication-2094857">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094857">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring music performance and perception through motion capture.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4051979">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This talk will present different approaches to capturing human bodily activity. Motion capture can be performed with sensor-based and camera-based systems, each of which has benefits and limitations. Sensor-based systems are flexible and scalable and can easily be used outside laboratory environments. They are good at tracking relative motion and rotation information but less suitable for tracking position. Camera-based systems come in many flavors and can be used with and without markers. They excel at tracking positions but are prone to reflections and environmental noise. As a consequence, camera-based motion capture systems are better suited for laboratory settings. I will discuss my twenty-year-long experience using different motion capture systems to study music-related body motion. This includes research on musicians, including rehearsal techniques and performance strategies. Such studies push the limits of the technology when it comes to precision and accuracy. It is particularly challenging when using motion capture equipment in real-world concert settings. At the University of Oslo, we have successfully captured the motion of both solo and ensemble performances and are currently trying to scale up to a full orchestra. We are also carrying out motion capture of perceivers, audience members in concerts, dancers, and other people moving to music. Through the Norwegian Championship of Standstill, we have delved into human micromotion, the tiniest actions we can perform and perceive. At this level, motion capture can detect physiological signals, such as breathing and heart rate. Data from such studies are interesting scientifically and have also been used in artistic practice. Finally, I will give examples of how real-time motion capture can be used in various creative applications, including &quot;inverse&quot; sonic interaction.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2195909" class="vrtx-external-publication">
        <div id="vrtx-publication-2195909">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2195909">
                G?ksülük, Bilge Serdar
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From Bodily Interactions to Embodied Concepts.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4570175">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2090748" class="vrtx-external-publication">
        <div id="vrtx-publication-2090748">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2090748">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Responsible Research and Innovation in Sound and Music Computing.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5173416">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">My presentation will focus on how the ongoing shift to Open Research within the field of sound and music computing (SMC) promotes Responsible Research and Innovation (RRI).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2075629" class="vrtx-external-publication">
        <div id="vrtx-publication-2075629">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2075629">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Data? ?pen forskningspraksis for ikke-datadrevne fagfelt.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4061986">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva er ?pen forskning og hva skal til for ? etablere en kultur for ?pen forskning i humanistiske fag og samfunnsfag? Hvordan vi skal komme til en ny normal der det vi i dag omtaler som ??pen forskning? kun kalles forskning?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2061615" class="vrtx-external-publication">
        <div id="vrtx-publication-2061615">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2061615">
                Lesteberg, Mari &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MICRO and MACRO - Developing New Accessible Musicking Technologies.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4397733">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the development of two musical instrument prototypes developed to explore how non-haptic music technologies can be accessed from a web browser and how they can offer accessibility for people with low fine motor skills. Two approaches to browser-based motion capture were developed and tested during an iterative design process. This was followed by observational studies of two user groups: one with low fine motor skills and one with normal motor skills. Contrary to our expectations, we found that avoiding the use of buttons and mice did not make the apps more accessible for the participants with low fine motor skills. Furthermore, motion speed was considered more important for people with low motor skills than the size of the control action. The most important finding is that browser-based musical instruments using sensor-based and video-based motion tracking are not only feasible but allow for reaching much larger groups of people than previously possible. This may ultimately lead to both more personalized and accessible musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1999735" class="vrtx-external-publication">
        <div id="vrtx-publication-1999735">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1999735">
                Jensenius, Alexander Refsum &amp; Platou, Jeanette
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan kunstig intelligens v?re kreativ?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2 Arena.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4042876">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva er AI, eller kunstig intelligens, som vi kaller det p? norsk. I Arena i dag ser vi p? hvor kunstig intelligensk blir brukt, og hva det funker i. Kan vi f? en data til ? skrive poesi, og hva med musikken og kunsten?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2090667" class="vrtx-external-publication">
        <div id="vrtx-publication-2090667">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2090667">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Erfaringer med ? lage 3xMOOC.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4604036">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">I denne presentasjonen vil jeg presentere hvordan vi gjennom ?rene har utviklet tre komplette nettkurs ved Universitetet i Oslo: Music Moves (2016), Motion Capture (2022) og Pupillometry (2023). Fokuset vil ligge p? muligheter og utfordringer i video i utdanningssammenheng.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094129" class="vrtx-external-publication">
        <div id="vrtx-publication-2094129">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094129">
                Remache-Vinueza, Byron &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Groundbreaking New Technology Allows People To Listen to Music Through Touch.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        SciTechDaily.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3428633">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">An audio-tactile algorithm created by University of Malaga scientists conveys melodic information through vibration.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2076779" class="vrtx-external-publication">
        <div id="vrtx-publication-2076779">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2076779">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstfag og ?pen forskning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5101092">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvilke dilemmaer oppst?r n?r forskningsdata og resultater skal deles og gjenbrukes? Og hvilke muligheter medf?rer mer ?penhet og ?kt deling av data for fag som eksempelvis musikk, visuell kunst, film, scenekunst og design?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2076796" class="vrtx-external-publication">
        <div id="vrtx-publication-2076796">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2076796">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO and Interdisciplinarity.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4940705">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this presentation I will discuss how we have been developing an interdisciplinary research centre, in which researchers from the arts and humanities and the social and natural sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094136" class="vrtx-external-publication">
        <div id="vrtx-publication-2094136">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094136">
                Remache-Vinueza, Byron; Trujillo-León, Andrés; Clim, Maria-Alena; Sarmiento-Ortiz, Fabián; Topon-Visarrea, Liliana &amp; Jensenius, Alexander Refsum
                    <a href="javascript:void(0);" title="Hent alle deltakere" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2094136/contributors', 'vrtx-publication-contributors-2094136')">
                    [Vis alle&nbsp;7&nbsp;forfattere av denne artikkelen]</a>
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mapping Monophonic MIDI Tracks to Vibrotactile Stimuli Using Tactile Illusions.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4368703">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In this project, we propose an algorithm to convert musical features and structures extracted from monophonic MIDI files to tactile illusions. Mapping music to vibrotactile stimuli is a challenging process since the perceptible frequency range of the skin is lower than that of the auditory system, which may cause the loss of some musical features. Moreover, current proposed models do not warrant the correspondence between the emotional response to music and the vibrotactile version of it. We propose to use tactile illusions as an additional resource to convey more meaningful vibrotactile stimuli. Tactile illusions enable us to add dynamics to vibrotactile stimuli in the form of movement, changes of direction, and localization. The suggested algorithm converts monophonic MIDI files into arrangements of two tactile illusions: “phantom motion” and “funneling”. The validation of the rendered material consisted of presenting the audio rendered from MIDI files to participants and then adding the vibrotactile component to it. The arrangement of tactile illusions was also evaluated alone. Results suggest that the arrangement of tactile illusions evokes more positive emotions than negative ones. This arrangement was also perceived as more agreeable and stimulating than the original audio. Although musical features such as rhythm, tempo, and melody were mostly recognized in the arrangement of tactile illusions, it provoked a different emotional response from that of the original audio.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072111" class="vrtx-external-publication">
        <div id="vrtx-publication-2072111">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072111">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Open music research between art and science.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4837237">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Many music researchers are turning towards studying music performance and perception in real-world settings. Collecting data in a concert situation is non-trivial, and FAIRifying the data is even more challenging. In this talk, I will discuss some challenges with handling privacy and copyright matters in music research. I will also discuss some benefits of working towards more open music research. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072113" class="vrtx-external-publication">
        <div id="vrtx-publication-2072113">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072113">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Publish or Perish? Researcher assessment is about to change.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3455971">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In July 2022, the European Commission launched an Agreement On Reforming Research Assessment. After years of talking, there is significant momentum for changing how researchers are assessed. In this talk, I will present some work leading up to the new agreement and how Universities Norway took a lead when developing the Norwegian Career Assessment Matrix (NOR-CAM). The core idea is that academics need to get recognition for a broader range of activities. This is important for transitioning to more open research practices and diverse career paths within and outside academia.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2007982" class="vrtx-external-publication">
        <div id="vrtx-publication-2007982">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2007982">
                Outa, Amani al; Kn?velsrud, Helene; Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Winner of RRI-inspired transdisciplinary side quest call.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Centre for Digital Life Norway.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4689931">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Centre for Digital Life Norway (DLN) is excited to congratulate the team behind the project “The autophagic symphony – Unveiling the final rhythm” as winner of DLN’s RRI-inspired transdisciplinary side quest call.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072114" class="vrtx-external-publication">
        <div id="vrtx-publication-2072114">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072114">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Alternatives to journal-based metrics in research assessment.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3497408">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Science Europe invites institutional leaders, researchers at all stages of their careers, and experts from the field to join its 18 and 19 October 2022 conference on Open Science to discuss two key questions: (1) Is Open Science ready to become the norm in research? (2) How do we ensure this becomes an equitable transition? To find answers to these questions, the conference will provide a comprehensive overview of practical and policy initiatives, research assessment reforms, and financial measures that support the transition to Open Science. We will also look forward to new and emerging trends.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072116" class="vrtx-external-publication">
        <div id="vrtx-publication-2072116">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072116">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Experiencing the world through sound actions.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5184233">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This talk will reflect on my year-long project recording a daily &quot;sound action&quot;. These are multimodal entities consisting of body motion and its resultant sound. When we only see a sound action, we can imagine its sound. If we only hear a sound action, we can imagine the body motion and objects involved in the interaction. Sound actions are ubiquitous in everyday life yet rarely discussed and reflected upon. My attempts at analyzing sound actions show some of the complexity involved in making sense of actions, reactions, and interactions with the world. This complexity can also inspire creative usage. I will present examples of meaningless and cognitively conflicting sound actions in the talk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072020" class="vrtx-external-publication">
        <div id="vrtx-publication-2072020">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072020">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From ideas to reality: interdisciplinary collaborations.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4883666">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1954918" class="vrtx-external-publication">
        <div id="vrtx-publication-1954918">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954918">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Open Research as Communication Strategy.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5124887">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1954357" class="vrtx-external-publication">
        <div id="vrtx-publication-1954357">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954357">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reflections on the Development of the Musical Gestures Toolbox for Python.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4814723">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents the Musical Gestures Toolbox (MGT) for Python, a collection of modules targeted at researchers working with video recordings. The toolbox includes video visualization techniques such as creating motion videos, motion history images, and motiongrams. These visualizations allow for studying video recordings from different temporal and spatial perspectives. The toolbox also includes basic computer vision methods, and it is designed to integrate well with audio analysis toolboxes. The MGT was initially developed to analyze music-related body motion (of musicians, dancers, and perceivers) but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, pedagogy, psychology, and medicine.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/filter?fundingIdentifier=324003&amp;fundingSource=NFR">Se alle arbeider i NVA</a></p>
    </div>

      </div>
    </div>



	  
            
      
        <div class="financing">
          <h2>Finansiering</h2>

          
            <div class="financing-info-wrapper">
            



          
            
            <div class="financing-info">
            <img lang="no" src="/vrtx/dist/resources/uio2/css/images/partner-logos/funded-research-council-no.svg" class="logo-img" alt="St?ttet av 澳门皇冠体育,皇冠足球比分sr?det" loading="lazy"/>
            
            
              <p class="financing-info-project-number"><span>Prosjektnummer: </span><span>324003</span></p>
            
            </div>
          
          </div>
        
        
      
        </div>
      
            
      
            
      
        <div class="secondary-content"><h2>Vitenskapelig r?dgivning</h2><div class="two-column-list"><ul><li><a href="https://lesc-cnrs.fr/fr/profil-utilisateur/cguillebaud" target="vrtx-preview-window">Christine Guillebaud (CNRS)</a> </li><li><a href="https://www.mcgill.ca/ahcs/people-contacts/faculty/sterne" target="vrtx-preview-window">Jonathan Sterne (McGill University)</a></li><li><a href="https://research.flw.ugent.be/en/marc.leman" target="vrtx-preview-window">Marc Leman (Ghent University)</a></li><li><a href="https://www.natashabarrett.org/" target="vrtx-preview-window">Natasha Barrett (Norwegian Academy of Music)</a> </li><li><a href="https://www.sheffield.ac.uk/music/people/academic-staff/nicola-dibben" target="vrtx-preview-window">Nicola Dibben (Sheffield University)</a>?</li></ul></div><h2>Lokal ekspertgruppe</h2><div class="two-column-list"><ul><li><a href="/ritmo/english/people/management/anneda/index.html" target="vrtx-preview-window">Anne Danielsen</a></li><li><a href="https://www.sv.uio.no/psi/english/people/aca/brunol/index.html" target="vrtx-preview-window">Bruno Laeng</a></li><li><a href="https://www.hf.uio.no/imv/english/people/aca/tenured/jonnakv/index.html" target="vrtx-preview-window">Jonna Vuoskoski</a></li><li><a href="https://www.hf.uio.no/imv/english/people/aca/tenured/yngvark/index.html" target="vrtx-preview-window">Yngvar Kjus</a></li><li><a href="https://www.nina.no/english/Contact/Employees/Employee-info?AnsattID=16266" target="vrtx-preview-window">Rose Keller</a></li></ul></div><h2>澳门皇冠体育,皇冠足球比分</h2><p><a href="/ritmo/english/news-and-events/events/conferences/2024/ventilation/index.html">International Workshop on the Aesthetics of Ventilation Sound</a>, 2. desember 2024.</p><h2>?pen forskning</h2><p>AMBIENT er et fyrt?rnsprosjekt innen <a href="/ritmo/english/projects/ambient/open-research/" target="vrtx-preview-window">?pen forskning</a>. M?let er ? ?pne s? mye som mulig av forskningsprosessen, fra s?knad til data, kode og publikasjoner.</p><h2>#StillStanding</h2><p>Gjennom 2023 vil AMBIENT-prosjektleder Alexander Refsum Jensenius st? stille tre minutter hver dag i et nytt rom. M?let er ? samle data av menneskelige mikrobevegelser til en bok som oppsummerer <a href="/ritmo/english/projects/completed-projects/micro/" target="vrtx-preview-window">MICRO-prosjektet</a>. P? samme tid samles det inn data fra 365 ulike rom som vil brukes i AMBIENT-prosjektet.</p><ul><li><a href="https://mastodon.online/tags/StillStanding" target="vrtx-preview-window">F?lg #StillStanding-prosjektet p? Mastodon (mastodon.online)</a></li></ul></div>
      
            
      
        <a id="vrtx-change-language-link" href="/ritmo/english/projects/ambient/index.html">
          English<span class="offscreen-screenreader">
            version of this page
          </span>
        </a>
      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Publisert</span> <span class="published-date">30. jan. 2025 11:19 </span>
        
          - <span class="last-modified-date">Sist endret</span> <span class="last-modified-date">24. mars 2026 11:40</span>
        
        </div>
      
          </div>
        </div>
      
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Kontakt</h2>
   <p><a href="/ritmo/om/">Kontakt oss</a><br>
   <a href="/om/finn-fram/omrader/gaustad/ga09/">Finn frem</a></p>
</div>
<div>
   <h2>Om nettstedet</h2>
   <p><a href="/om/regelverk/personvern/personvernerklering-nett.html">Bruk av informasjonskapsler</a><br>
   <a href="/ritmo/prosjekter/ambient/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Tilgjengelighetserkl?ring</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Ansvarlig for denne siden</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/prosjekter/ambient/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Logg inn
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/" title="G? til uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
