<!DOCTYPE html>
<html lang="no">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>MIRAGE - Et integrert AI-basert system for avansert musikkanalyse 
      
      
        (avsluttet)
       - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="MIRAGE - Et integrert AI-basert system for avansert musikkanalyse 
      
      
        (avsluttet)
       - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
              
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary_large_image" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="MIRAGE - Et integrert AI-basert system for avansert musikkanalyse" />

    
      <meta name="twitter:description" content="Et hovedm?l i prosjektet er ? videreutvikle datamaskiners evne til ? lytte til og forst? musikk. Dette vil n?dvendiggj?re utvikling av banebrytende teknologi som ogs? vil kunne hjelpe menneskelige lyttere til ? bedre forst? og verdsette musikk. En viktig anvendelse av denne teknologien vil v?re ? gj?re musikk mer tilgjengelig og engasjerende.
" />
    

    
      <meta name="twitter:image" content="/ritmo/english/projects/mirage/mirage-index.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/prosjekter/mirage/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/prosjekter/mirage/index.html" />
    <meta property="og:type" content="website" />
    
      <meta property="og:description" content="Et hovedm?l i prosjektet er ? videreutvikle datamaskiners evne til ? lytte til og forst? musikk. Dette vil n?dvendiggj?re utvikling av banebrytende teknologi som ogs? vil kunne hjelpe menneskelige lyttere til ? bedre forst? og verdsette musikk. En viktig anvendelse av denne teknologien vil v?re ? gj?re musikk mer tilgjengelig og engasjerende.
" />
    

    

    
      
      
        
        
          
            
            
              
              <meta property="og:image" content="/ritmo/english/projects/mirage/mirage-index.jpg" />
              <meta property="og:image:width" content="507" />
              <meta property="og:image:height" content="254" />

              
                

                
                
                
                  
                

                
                
                
                <meta property="og:updated_time" content="1731918970" />
              
            
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context ritmo faculty no '  id="vrtx-structured-project">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Hopp til innhold">
    <ul id="hidnav">
     <li><a href="#right-main">Hopp til hovedinnhold</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo" class="uio-host">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Spr?kmeny">
              <span>No</span>
              <a href="/ritmo/english/" class="header-lang-en-link" lang="en">En</a>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Meny"><span>Meny</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/for-ansatte/">For ansatte</a></li>
            <li class="my-studies"><a href="https://minestudier.no/nb/index.html">Mine studier</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">S?k i nettsidene til UiO</label>
            
            <button type="submit">S?k</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="vrtx-active-item ritmo parent-folder vrtx-current-item" aria-current="page">
  <a href="/ritmo/">澳门皇冠体育,皇冠足球比分 RITMO</a>
    </li>
    <li class="om">
  <a href="/ritmo/om/">Om senteret</a>
    </li>
    <li class="personer">
  <a href="/ritmo/personer/">Personer</a>
    </li>
    <li class="aktuelt">
  <a href="/ritmo/aktuelt/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="forskning">
  <a href="/ritmo/forskning/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="publikasjoner">
  <a href="/ritmo/publikasjoner/">Publikasjoner</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/">G? til uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Undermeny</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/prosjekter/"><span>Prosjekter</span></a></li>
            <li class="vrtx-parent" ><a class="vrtx-marked" href="/ritmo/prosjekter/mirage/" aria-current="location"><span>MIRAGE</span></a>

      <ul>
          <li class="vrtx-child"><a  href="/ritmo/prosjekter/mirage/arrangementer/"><span>arrangementer</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="right-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Br?dsmulesti">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-3 vrtx-breadcrumb-before-active">
            <a href="/ritmo/prosjekter/">Prosjekter</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4 vrtx-breadcrumb-active">MIRAGE
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
        <div id="vrtx-content">
          
          <div id="vrtx-main-content">
            
      
        <a id="vrtx-change-language-link" href="/ritmo/english/projects/mirage/index.html">
          English<span class="offscreen-screenreader">
            version of this page
          </span>
        </a>
      
            <h1>MIRAGE - Et integrert AI-basert system for avansert musikkanalyse 
      
      
        (avsluttet)
      </h1>
            
      
        <div class="vrtx-introduction"><p>Et hovedm?l i prosjektet er ? videreutvikle datamaskiners evne til ? lytte til og forst? musikk. Dette vil n?dvendiggj?re utvikling av banebrytende teknologi som ogs? vil kunne hjelpe menneskelige lyttere til ? bedre forst? og verdsette musikk. En viktig anvendelse av denne teknologien vil v?re ? gj?re musikk mer tilgjengelig og engasjerende.</p>
</div>
      
            
      

      
      
      
      
      

      
        
        

        <div class="vrtx-introduction-image">
          <img src="/ritmo/english/projects/mirage/mirage-index.jpg" alt="Bildet kan inneholde: himmel, strand, tre, horisont, farger og nyanser." loading="lazy"/>
          
        </div>
      

      
            <div class="vrtx-article-body">
              <h2><a href="/ritmo/prosjekter/mirage/arrangementer/avslutningsseminar.html">KOMMENDE:&nbsp;MIRAGE </a><a href="/ritmo/prosjekter/mirage/arrangementer/avslutningsseminar.html">Avslutningsseminar: Digitalisering og datamaskinst?ttet musikkanalyse av folkemusikk</a><a href="/ritmo/english/projects/mirage/events/2024/april/closing-seminar.html"> –&nbsp;Apr. 26, Nasjonalbiblioteket, Oslo</a></h2>

<h2>Om prosjektet</h2>

<p>Vi skal videreutvikle v?rt datateknologiske rammeverk slik at vi kan hente ut store mengder informasjon om musikkens elementer som klang, toner, rytme, og form. Musikk kan ofte v?re kompleks, og for ? kunne trekke ut mening fra denne subtile kunstformen, m? flere musikkvitenskapelige elementer innarbeides i det datateknologiske rammeverket. Gjentakelser er ofte et viktig element i musikk; motiver kan bli gjentatt mange ganger i l?pet av et musikkverk, og flere musikkverk kan ligne hverandre slik at de danner s?regne stilkategorier. ? kunne avdekke gjentakelser er krevende men ogs? helt avgj?rende for prosjektet. Prosjektet vil ta for seg et stort utvalg musikalske stilarter fra tradisjonsmusikk, klassisk musikk og popul?rmusikk, akustisk s? vel som elektronisk, og fra ulike kulturer. Denne omfattende kartleggingen av musikkelementer ved hjelp av disse dataredskapene, vil ogs? bli brukt til ? utforske lytteres affektive og kroppslige musikk-relaterte forestillinger.</p>

<p>Foruten ? bidra til musikkvitenskap, musikkteknologi og musikkognisjon, vil dette prosjektet ogs? levere ny teknologi som kan brukes av et bredt publikum. Formidling av musikk ved hjelp a musikkvideoer har stort potensiale, s?rlig n?r det lydlige og det visuelle er godt integrert, og prosjektets teknologier vil gj?re det mulig ? generere interessante videoer fra mange forskjellige musikktyper. Vi tror slike maskingenererte visualiseringer av lyd-data vil kunne berike musikkopplevelsen og gj?re musikk mer tilgjengelig. Slike visualiseringer av musikk kan ogs? lette s?k i store musikksamlinger og vil i tillegg kunne ha anvendelser i musikkterapi.</p>

<p>Prosjektet er et 澳门皇冠体育,皇冠足球比分 med <a href="https://www.nb.no/samlingen/musikk/">musikkseksjonen p? Nasjonalbiblioteket</a>, verdensledende innen digitalisering og tilgjengeliggj?ring av kulturar.</p>

<p><a href="/ritmo/english/projects/mirage/index.html">Mer informasjon p? engelsk her.</a></p>

            </div>
            
	  
	  

    
    

    
    

	  
      



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publikasjoner</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Vitenskapelige artikler og bokkapitler</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">Andre</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10254559" class="vrtx-external-publication">
        <div id="vrtx-publication-10254559">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254559">
                Christodoulou, Anna-Maria &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Multimodal Dataset of Greek Folk Music.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Luca, Elsa De (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    DLfM &#39;25: Proceedings of the 12th International Conference on Digital Libraries for Musicology.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720833.</span>
                            
                <span class="vrtx-pages">s. 19–27.</span>
            doi: <a href="https://doi.org/https:/dl.acm.org/doi/10.1145/3748336.3748339">https:/dl.acm.org/doi/10.1145/3748336.3748339</a>.
            <a href="https://hdl.handle.net/11250/4911355">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper presents a multimodal dataset of Greek folk dance music, focusing on syrtos and balos. Developed to support research in computational musicology, the dataset improves access to Greek musical heritage through manually transcribed MIDI scores, aligned lyrics, and rich metadata, all curated by expert musicologists. Through pattern analysis and feature extraction, we examine both shared melodic structures and unique characteristics of each dance, with some examples reflecting traces of oral transmission. While metadata accompanies the collection to support organization and context, our primary emphasis is on the musical and lyrical content. This work contributes to digital ethnomusicology by showing how multimodal datasets of folk music can inform both analytical research and cultural heritage preservation.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10253409" class="vrtx-external-publication">
        <div id="vrtx-publication-10253409">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10253409">
                Christodoulou, Anna-Maria; Glette, Kyrre; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusiQAl: A Dataset for Music Question–Answering through Audio–Video Fusion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Transactions of the International Society for Music Information Retrieval.
                </span>
                            8(1),
                <span class="vrtx-pages">s. 265–282.</span>
            doi: <a href="https://doi.org/10.5334/tismir.222">10.5334/tismir.222</a>.
            <a href="https://hdl.handle.net/11250/4732806">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Music question–answering (MQA) is a machine learning task where a computational system analyzes and answers questions about music?related data. Traditional methods prioritize audio, overlooking visual and embodied aspects crucial to music performance understanding. We introduce MusiQAl, a multimodal dataset of 310 music performance videos and 11,793 human?annotated question–answer pairs, spanning diverse musical traditions and styles. Grounded in musicology and music psychology, MusiQAl emphasizes multimodal reasoning, causal inference, and cross?cultural understanding of performer–music interaction. We benchmark AVST and LAVISH architectures on MusiQAI, revealing strengths and limitations, underscoring the importance of integrating multimodal learning and domain expertise to advance MQA and music information retrieval.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2394665" class="vrtx-external-publication">
        <div id="vrtx-publication-2394665">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2394665">
                Lartillot, Olivier; Swarbrick, Dana; Upham, Finn &amp; Cancino-Chacón, Carlos Eduardo
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization of a String Quartet Performance of a Bach Fugue: Design and Subjective Evaluation.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            8.
            doi: <a href="https://doi.org/10.1177/20592043251352299">10.1177/20592043251352299</a>.
            <a href="https://hdl.handle.net/11250/4644566">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Visualizing music—through music notation, analytical representations, or music videos—might potentially boost the appreciation of music in all its richness. The purpose of this study was to design and test a visualization strategy aimed at explicating to a large audience with diverse backgrounds—especially novices—the multifaceted beauty of the final Contrapunctus in J.S. Bach&#39;s The Art of Fugue, performed by the Danish String Quartet. At the surface level of the musical structure, the rich fluctuation of pitch shaped by each musician was depicted in the form of undulating pitch curves. At a deeper structural level, the repetition of pitch curves, distinctive of fugues, was highlighted through vertical alignment—inspired by a technique called paradigmatic analysis, originating from anthropology and music semiology. The visualization was initially prototyped in the form of a real-time technology as part of the MusicLab Copenhagen research concert. The concert audience focused on the performance itself, and did not pay much attention to, nor appreciate, the visualization. To evaluate more thoroughly the potential of the visualization, participants with varied musical expertise and taste were invited to listen to a recorded performance of the piece and watch the visualization on their own computer. A large majority reported that they felt they understood the visualization, around half of them felt that it enhanced their musical understanding, and a small group felt that it helped them to better appreciate the music.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2272447" class="vrtx-external-publication">
        <div id="vrtx-publication-2272447">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272447">
                Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musicological and Technological Perspectives on Computational Analysis of Electroacoustic Music.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Jensenius, Alexander Refsum (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Sonic Design: Explorations Between Art and Science.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=DC752087-7122-4D3A-9E4F-382AA2F39D2C">Springer Nature</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783031578922.</span>
                            
                <span class="vrtx-pages">s. 271–297.</span>
            doi: <a href="https://doi.org/10.1007/978-3-031-57892-2_15">10.1007/978-3-031-57892-2_15</a>.
            <a href="https://hdl.handle.net/10852/118846">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Analysing electroacoustic music remains challenging, leaving this artistic treasure somewhat out of reach of mainstream musicology and many music lovers. This chapter examines electroacoustic music analysis, covering musicological investigations and desires and technological challenges and potentials. The aim is to develop new technologies to overcome the current limitations. The compositional and musicological foundations of electroacoustic music analysis are based on Pierre Schaeffer’s Traité des objects musicaux. The chapter presents an overview of core analytical principles underpinning more recent musicological approaches, including R. Murray Schafer’s soundscape analysis, Denis Smalley’s spectro-morphology, and Lasse Thoresen’s graphical formalisation. Then the state of the art in computational analysis of electroacoustic music is compiled and organised along broad themes, from detecting sound objects to estimating dynamics, facture and grain, mass, motions, space, timbre and rhythm. Finally, I sketch the principles of what could be a Toolbox des objets sonores.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2196943" class="vrtx-external-publication">
        <div id="vrtx-publication-2196943">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2196943">
                Thedens, Hans-Hinrich &amp; Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        AudioSegmentor: Et verkt?y for formidling av arkivopptak p? nettet.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Studia Musicologica Norvegica.
                </span>
                <span class="vrtx-issn">ISSN 0332-5024.</span>
                            49(1),
                <span class="vrtx-pages">s. 92–101.</span>
            doi: <a href="https://doi.org/10.18261/smn.49.1.7">10.18261/smn.49.1.7</a>.
            <a href="https://hdl.handle.net/10852/109480">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Norske folkemusikkarkiver st?r foran utfordringer vedr?rende ? gj?re sine samlinger tilgjengelige p? nett n?r feltopptak faller i det fri etter 50 ?r. Denne artikkelen beskriver et fors?k p? ? tilrettelegge brukskopier i form av lydfiler slik at brukerne kan lytte til enkeltopptak i en nettpresentasjon av et arkivs innhold. Lydklassifiseringsteknologi er i stand til ? finne og markere starttidspunkt p? enkeltmelodier og spare arkivpersonalet for mange timers manuelt arbeid. Mirage-prosjektet p? UiOs RITMO-senter har utviklet et grensesnitt for et slikt verkt?y for Nasjonalbibliotekets folkemusikkarkiv og dets nettkatalog WebbFIOL. L?sningen vil kunne tas i bruk av alle som st?r overfor liknende utfordringer.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2193545" class="vrtx-external-publication">
        <div id="vrtx-publication-2193545">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2193545">
                Bishop, Laura; H?ffding, Simon; Lartillot, Olivier Serge Gabriel &amp; Laeng, Bruno
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mental Effort and Expressive Interaction in Expert and Student String Quartet Performance.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            6.
            doi: <a href="https://doi.org/10.1177/20592043231208000">10.1177/20592043231208000</a>.
            <a href="https://hdl.handle.net/11250/4027926">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2196635" class="vrtx-external-publication">
        <div id="vrtx-publication-2196635">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2196635">
                Lartillot, Olivier; Johansson, Mats Sigvard; Elowsson, Anders; Monstad, Lars L?berg &amp; Cyvin, Mattias Stor?s
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Dataset of Norwegian Hardanger Fiddle Recordings with Precise Annotation of Note and Beat Onsets.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Transactions of the International Society for Music Information Retrieval.
                </span>
                            6(1),
                <span class="vrtx-pages">s. 186–202.</span>
            doi: <a href="https://doi.org/10.5334/TISMIR.139">10.5334/TISMIR.139</a>.
            <a href="https://hdl.handle.net/11250/4647659">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2195699" class="vrtx-external-publication">
        <div id="vrtx-publication-2195699">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2195699">
                Maidhof, Clemens; Müller, Viktor; Lartillot, Olivier; Agres, Kat; Bloska, Jodie &amp; Asano, Rie
                    <a href="javascript:void(0);" title="Hent alle deltakere" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2195699/contributors', 'vrtx-publication-contributors-2195699')">
                    [Vis alle&nbsp;8&nbsp;forfattere av denne artikkelen]</a>
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Intra- and inter-brain coupling and activity dynamics during improvisational music therapy with a person with dementia: an explorative EEG-hyperscanning single case study.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Psychology.
                </span>
                            14.
            doi: <a href="https://doi.org/10.3389/fpsyg.2023.1155732">10.3389/fpsyg.2023.1155732</a>.
            <a href="https://hdl.handle.net/11250/5204141">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2192971" class="vrtx-external-publication">
        <div id="vrtx-publication-2192971">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192971">
                Szorkovszky, Alexander; Veenstra, Frank; Lartillot, Olivier Serge Gabriel; Jensenius, Alexander Refsum &amp; Glette, Kyrre
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied Tempo Tracking with a Virtual Quadruped,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2023.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network .
                </span>
                <span class="vrtx-issn">ISSN 9789152773727.</span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.10060970">10.5281/zenodo.10060970</a>.
            <a href="https://hdl.handle.net/11250/5089382">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Dynamic attending theory posits that we entrain to time-structured events in a similar way to synchronizing oscillators. Hence, a tempo tracker based on oscillators may replicate humans&#39; ability to rapidly and robustly identify musical tempi. We demonstrate this idea using virtual quadrupeds, whose gaits are controlled by oscillatory neural circuits known as central pattern generators (CPGs). The quadruped CPGs were first optimized for flexible gait frequency and direction, and then an additional recurrent layer was optimized for entrainment to isochronous pulses. Using excerpts of musical pieces, we find that the motion of these agents can rapidly entrain to simple rhythms. Performance was found to be partially predicted by pulse entropy, a measure of the sample&#39;s rhythmic complexity. Notably, in addition to having wide tempo ranges, the best performing agents can also entrain to rhythms that are periodic but not quantized on a grid. Our approach offers an embodied alternative to other dynamical systems-based approaches to entrainment, such as gradient-frequency arrays. Such agents could find use as participants in virtual musicking environments, or as real-world musical robots.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2046946" class="vrtx-external-publication">
        <div id="vrtx-publication-2046946">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2046946">
                Lartillot, Olivier; Elovsson, Anders; Johansson, Mats Sigvard; Thedens, Hans-Hinrich &amp; Monstad, Lars Alfred L?berg
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Segmentation, Transcription, Analysis and Visualisation of the Norwegian Folk Music Archive.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Pugin, Laurent (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    DLfM &#39;22: 9th International Conference on Digital Libraries for Musicology.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781450396684.</span>
                            
                <span class="vrtx-pages">s. 1–9.</span>
            doi: <a href="https://doi.org/10.1145/3543882.3543883">10.1145/3543882.3543883</a>.
            <a href="https://hdl.handle.net/10852/96024">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">We present an ongoing project dedicated to the transmutation of a collection of field recordings of Norwegian folk music established in the 1960s into an easily accessible online catalogue augmented with advanced music technology and computer musicology tools. We focus in particular on a major highlight of this collection: Hardanger fiddle music. The studied corpus was available as a series of 600 tape recordings, each tape containing up to 2 hours of recordings, associated with metadata indicating approximate positions of pieces of music. We first need to retrieve the individual recording associated with each tune, through the combination of an automated pre-segmentation based on sound classification and audio analysis, and a subsequent manual verification and fine-tuning of the temporal positions, using a home-made user interface.
Note detection is carried out by a deep learning method. To adapt the model to Hardanger fiddle music, musicians were asked to record themselves and annotate all played note, using a dedicated interface. Data augmentation techniques have been designed to accelerate the process, in particular using alignment of varied performances of same tunes. The transcription also requires the reconstruction of the metrical structure, which is particularly challenging in this style of music. We have also collected ground-truth data, and are conceiving a computational model.
The next step consists in carrying out detailed music analysis of the transcriptions, in order to reveal in particular intertextuality within the corpus. A last direction of research is aimed at designing tools to visualise each tune and the whole catalogue, both for musicologists and general public.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2046941" class="vrtx-external-publication">
        <div id="vrtx-publication-2046941">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2046941">
                Juslin, Patrik N.; Sakka, Laura S.; Barradas, Gon?alo T. &amp; Lartillot, Olivier
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Emotions, mechanisms, and individual differences in music listening: A stratified random sampling approach.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music Perception.
                </span>
                <span class="vrtx-issn">ISSN 0730-7829.</span>
                            40(1),
                <span class="vrtx-pages">s. 55–86.</span>
            doi: <a href="https://doi.org/10.1525/mp.2022.40.1.55">10.1525/mp.2022.40.1.55</a>.
            <a href="https://hdl.handle.net/11250/4875485">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Emotions have been found to play a paramount role in both everyday music experiences and health applications of music, but the applicability of musical emotions depends on: 1) which emotions music can induce, 2) how it induces them, and 3) how indi- vidual differences may be explained. These questions were addressed in a listening test, where 44 participants (aged 19–66 years) reported both felt emotions and subjective impressions of emotion mechanisms (Mec Scale), while listening to 72 pieces of music from 12 genres, selected using a stratified random sampling pro- cedure. The results showed that: 1) positive emotions (e.g., happiness) were more prevalent than negative emotions (e.g., anger); 2) Rhythmic entrainment was the most and Brain stem reflex the least frequent of the mechanisms featured in the BRECVEMA theory; 3) felt emotions could be accurately predicted based on self- reported mechanisms in multiple regression analyses; 4) self-reported mechanisms predicted felt emotions better than did acoustic features; and 5) individual listeners showed partly different emotion-mechanism links across stimuli, which may help to explain individual differences in emotional responses. Implications for future research and applications of musical emotions are discussed.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1882672" class="vrtx-external-publication">
        <div id="vrtx-publication-1882672">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1882672">
                Lartillot, Olivier; Nymoen, Kristian; C?mara, Guilherme Schmidt &amp; Danielsen, Anne
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational localization of attack regions through a direct observation of the audio waveform.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of the Acoustical Society of America.
                </span>
                <span class="vrtx-issn">ISSN 0001-4966.</span>
                            149(1),
                <span class="vrtx-pages">s. 723–736.</span>
            doi: <a href="https://doi.org/10.1121/10.0003374">10.1121/10.0003374</a>.
            <a href="https://hdl.handle.net/11250/5168819">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">
This article addresses the computational estimation of attack regions in audio recordings. Previous attempts to do so were based on the reduction of the audio waveform into an envelope curve, which decreases its temporal resolution. The proposed approach detects the attack region directly from the audio waveform. The attack region is modeled as a line starting from a low-amplitude point and intersecting one of the local maxima according to two principles: (1) maximizing the slope, while favoring, at the same time, a higher peak if the slope remains only slightly lower and (2) dismissing initial attack regions of relatively low amplitude. The attack start position is fine-tuned by intersecting the attack slope with the audio waveform. The proposed method precisely pinpoints the attack region in cases where it is unambiguously observable from the waveform itself. In such cases, previous methods selected a broader attack region due to the loss of temporal resolution. When attack regions are less evident, the proposed method’s estimation remains within the range of results provided by other methods. Applied to the prediction of judgments of P-center localization [Danielsen, Nymoen, Anderson, C^amara, Langer?d, Thompson, and London, J. Exp. Psychol. Hum. Percept. Perform. 45, 402–418 (2019)], the proposed method shows a significant increase in precision, at the expense of recall.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1923059" class="vrtx-external-publication">
        <div id="vrtx-publication-1923059">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1923059">
                Haugen, Mari Romarheim
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Investigating Music-Dance Relationships. A Case Study of Norwegian Telespringar.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of music theory.
                </span>
                <span class="vrtx-issn">ISSN 0022-2909.</span>
                            65(1),
                <span class="vrtx-pages">s. 17–38.</span>
            doi: <a href="https://doi.org/10.1215/00222909-9124714">10.1215/00222909-9124714</a>.
            <a href="https://hdl.handle.net/11250/3422533">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This article studies the rhythm of Norwegian telespringar, a tradition with an intimate relationship between music and dance that features a nonisochronous meter; that is, the durations between adjacent beats are unequal. A motion-capture study of a fiddler and dance couple revealed a long-medium-short duration pattern at the beat level in both the fiddler&#39;s and the dancers&#39; periodic movements. The results also revealed a correspondence between how the fiddler and the dancers executed the motion patterns. This correspondence suggests that the performers share a common understanding of the underlying “feel” of the music. The results are discussed in light of recent theoretical perspectives on the multimodality of human perception. It is argued that the special feel of telespringar derives from embodied sensations related to the dance and how music and dance have developed in tandem over time. The study advocates a holistic view of music and dance, the importance of insider experience, and the role of embodied experience in guiding our understanding of the music as such.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1896393" class="vrtx-external-publication">
        <div id="vrtx-publication-1896393">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1896393">
                Lartillot, Olivier
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Musicological Analysis of Notated Music: a Brief Overview.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Nota Bene.
                </span>
                <span class="vrtx-issn">ISSN 1891-4829.</span>
                            15,
                <span class="vrtx-pages">s. 142–161.</span>
            
            <a href="https://hdl.handle.net/10852/85647">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">I present a short overview of computational methods for musicological analysis of notated music. We first need to clarify the various levels of computational representations of music: on one side, notated music, on the other, audio recordings, and in the middle, a note-level representa- tion of music performance where higher-level musical descriptions are absent. The article provides a synthetic and partial panorama of the different types of music analysis that have been systematised and auto- mated using computers. While pioneering works were mainly focused on statistical descriptions of the surface of music, other dimensions of music analysis such as harmony, metre and structure have been taken into consideration since. I conclude by sketching my personal vision of the future of computational music analysis.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1995709" class="vrtx-external-publication">
        <div id="vrtx-publication-1995709">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1995709">
                Elovsson, Anders &amp; Lartillot, Olivier
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Hardanger Fiddle Dataset with Performances Spanning Emotional Expressions and Annotations Aligned using Image Registration,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 22nd International Society for Music Information Retrieval Conference, Online, Nov 7-12, 2021.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        International Society for Music Information Retrieval.
                </span>
                <span class="vrtx-issn">ISSN 9781732729902.</span>
                            
                <span class="vrtx-pages">s. 174–181.</span>
            
            <a href="https://hdl.handle.net/11250/4563250">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper presents a Hardanger fiddle dataset “HF1” with polyphonic performances spanning five different emotional expressions: normal, angry, sad, happy, and tender. The performances thus cover the four quadrants of the activity/valence-space. The onsets and offsets, together with an associated pitch, were human-annotated for each note in each performance by the fiddle players themselves. First, they annotated the normal version. These annotations were then transferred to the expressive performances using music alignment and finally human-verified. Two separate music alignment methods based on image registration were developed for this purpose; a B-spline implementation that produces a continuous temporal transformation curve and a Demons algorithm that produces displacement matrices for time and pitch that also account for local timing variations across the pitch range. Both methods start from an “Onsetgram” of onset salience across pitch and time and perform the alignment task accurately. Various settings of the Demons algorithm were further evaluated in an ablation study. The final dataset is around 43 minutes long and consists of 19 734 notes of Hardanger fiddle music, recorded in stereo. The dataset and source code are available online. The dataset will be used in MIR research for tasks involving polyphonic transcription, score alignment, beat tracking, downbeat tracking, tempo estimation, and classification of emotional expressions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1997624" class="vrtx-external-publication">
        <div id="vrtx-publication-1997624">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1997624">
                Weisser, Stéphanie; Lartillot, Olivier &amp; Sechehaye, Hélène
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Investiguer la grésillance. Pour une approche ethno-acoustique du timbre musical.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Cahiers d&#39;ethnomusicologie.
                </span>
                <span class="vrtx-issn">ISSN 2235-7688.</span>
                            34,
                <span class="vrtx-pages">s. 37–58.</span>
            
            <a href="https://hdl.handle.net/11250/3296700">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">é</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1888897" class="vrtx-external-publication">
        <div id="vrtx-publication-1888897">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1888897">
                Lartillot, Olivier &amp; Bruford, Fred
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Bistate reduction and comparison of drum patterns,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 21st International Society for Music Information Retrieval (ISMIR) Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=40357406-08B4-4149-971B-62C6F8B20313">McGill-Queen&#39;s University Press</a>.
                </span>
                <span class="vrtx-issn">ISSN 9780981353708.</span>
                            
                <span class="vrtx-pages">s. 318–324.</span>
            
            <a href="https://hdl.handle.net/11250/4198742">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper develops the hypothesis that symbolic drum patterns can be represented in a reduced form as a sim- ple oscillation between two states, a Low state (commonly associated with kick drum events) and a High state (often associated with either snare drum or high hat). Both an onset time and an accent time is associated to each state. The systematic inference of the reduced form is formal- ized. This enables the specification of a rhythmic struc- tural similarity measure on drum patterns, where reduced patterns are compared through alignment. The two-state representation allows a low computational cost alignment, once the complex topological formalization is fully taken into account. A comparison with the Hamming distance, as well as similarity ratings collected from listeners on a drum loop dataset, indicates that the bistate reduction enables to convey subtle aspects that goes beyond surface-level com- parison of rhythmic textures.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1888909" class="vrtx-external-publication">
        <div id="vrtx-publication-1888909">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1888909">
                Bruford, Fred &amp; Lartillot, Olivier
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Multidimensional similarity modelling of complex drum loops using the GrooveToolbox,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 21st International Society for Music Information Retrieval (ISMIR) Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=40357406-08B4-4149-971B-62C6F8B20313">McGill-Queen&#39;s University Press</a>.
                </span>
                <span class="vrtx-issn">ISSN 9780981353708.</span>
                            
                <span class="vrtx-pages">s. 263–270.</span>
            
            <a href="https://hdl.handle.net/10852/84395">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The GrooveToolbox is a new Python toolbox implementing various algorithms, new and pre-existing, for the analysis and comparison of symbolic drum loops, including rhythm features, similarity metrics and microtiming features. As part of the GrooveToolbox we introduce two new metrics of rhythm similarity and four features for describing the significant properties of microtiming deviations in drum loops. Based on a two-part perceptual evaluation, we show these four new microtiming features can each correlate to similarity perception, and be used with rhythm similarity metrics to improve personalized similarity models for drum loops. A new measure of structural rhythmic similarity is also shown to correlate more strongly to similarity perception of drum loops than the more com- monly used Hamming distance. These results point to the potential application of the GrooveToolbox and its new features in drum loop analysis for intelligent music production tools. The GrooveToolbox may be found at: https://github.com/fredbru/GrooveToolbox</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1830940" class="vrtx-external-publication">
        <div id="vrtx-publication-1830940">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1830940">
                Elovsson, Karl Anders
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Polyphonic pitch tracking with deep layered learning.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of the Acoustical Society of America.
                </span>
                <span class="vrtx-issn">ISSN 0001-4966.</span>
                            148(1),
                <span class="vrtx-pages">s. 446–468.</span>
            doi: <a href="https://doi.org/10.1121/10.0001468">10.1121/10.0001468</a>.
            <a href="https://hdl.handle.net/11250/4864489">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This article presents a polyphonic pitch tracking system that is able to extract both framewise and note-based estimates from audio. The system uses several artificial neural networks trained individually in a deep layered learning setup. First, cascading networks are applied to a spectrogram for framewise fundamental frequency (f0) estimation. A sparse receptive field is learned by the first network and then used as a filter kernel for parameter sharing throughout the system. The f0 activations are connected across time to extract pitch contours. These contours define a framework within which subsequent networks perform onset and offset detection, operating across both time and smaller pitch fluctuations at the same time. As input, the networks use, e.g., variations of latent representations from the f0 estimation network. Finally, erroneous tentative notes are removed one by one in an iterative procedure that allows a network to classify notes within a correct context. The system was evaluated on four public test sets: MAPS, Bach10, TRIOS, and the MIREX Woodwind quintet and achieved state-of-the-art results for all four datasets. It performs well across all subtasks f0, pitched onset, and pitched offset tracking.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1830612" class="vrtx-external-publication">
        <div id="vrtx-publication-1830612">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1830612">
                Lartillot, Olivier; Cancino-Chacón, Carlos &amp; Brazier, Charles
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Real-Time Visualisation Of Fugue Played By A String Quartet.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Spagnol, Simone &amp; Valle, Andrea (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Axea sas/SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9788894541502.</span>
                            
                <span class="vrtx-pages">s. 115–122.</span>
            
            <a href="https://hdl.handle.net/11250/4357063">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">We present a new system for real-time visualisation of music performance, focused for the moment on a fugue played by a string quartet. The basic principle is to offer a visual guide to better understand music using strategies that should be as engaging, accessible and effective as possible. The pitch curves related to the separate voices are drawn on a space whose temporal axis is normalised with respect to metrical positions, and aligned vertically with respect to their thematic and motivic classification. Aspects related to tonality are represented as well. We describe the underlying technologies we have developed and the technical setting. In particular, the rhythmical and structural representation of the piece relies on real-time polyphonic audio-to-score alignment using online dynamic time warping. The visualisation will be presented at a concert of the Danish String Quartet, performing the last piece of The Art of Fugue by Johann Sebastian Bach.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/filter?fundingIdentifier=287152&amp;fundingSource=NFR">Se alle arbeider i NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10254584" class="vrtx-external-publication">
        <div id="vrtx-publication-10254584">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254584">
                Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational music analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3830603">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254583" class="vrtx-external-publication">
        <div id="vrtx-publication-10254583">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254583">
                Wosch, Thomas; Vobig, Bastian &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Human Interaction assessment and Generative segmentation in Health &amp; Music.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.youtube.com/watch?v=I4jaZIzX0wg">https:/www.youtube.com/watch?v=I4jaZIzX0wg</a>.
            <a href="https://hdl.handle.net/11250/4192133">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Improvisation in music therapy has been shown to be an effective technique for engaging clients in emotionally rooted (inter)action to treat affective disorders such as major depression (Aalbers et al., 2017; Erkkil? et al., 2011). During improvisation, however, a variety of musical information is exchanged, resulting in a highly complex musical and interpersonal situation. While traditional models of music therapy analysis emphasise aural analysis and assessment of single sessions (Bruscia, 1987), more recent and elaborated methods, such as microanalysis, focus on the detailed development of improvisation sessions (Wosch, 2021; Wosch &amp; Erkkil?, 2016), which comes at the cost of a more time-consuming application process. Digital processing, as in music information retrieval and machine learning, seems promising to accelerate the analysis process, but requires considerable preliminary work in data preprocessing and formalisation of the high-level concepts used in music therapy to develop a suitable dataset for model training. Moreover, additional benefits of digital processing comprehend a more detailed and precise analysis of musical data.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254581" class="vrtx-external-publication">
        <div id="vrtx-publication-10254581">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254581">
                Sudo, Marina; Ziegler, Michelle; Akkermann, Miriam &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards Collaborative Analysis: Kaija Saariaho’s Io (1986–87).
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4239531">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254580" class="vrtx-external-publication">
        <div id="vrtx-publication-10254580">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254580">
                Sudo, Marina &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Contemporary Music Analysis and Auditory Memory: The Use of Computational Tools as an Aid for Listening.
                </span>
                            
            doi: <a href="https://doi.org/https:/fabricadesites.fcsh.unl.pt/ncmm/ncmm-2025-program/">https:/fabricadesites.fcsh.unl.pt/ncmm/ncmm-2025-program/</a>.
            <a href="https://hdl.handle.net/11250/4251690">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Music analysis involves categorising and interpreting sonic elements to uncover the structure and meaning of a work. In contemporary music studies, analysts often face methodological challenges in this process, especially when dealing with works that contain high degrees of complexity and ambiguity in terms of timbre, texture and temporal structure. This paper proposes a methodological model for analysing spatiotemporal complexities commonly observed in contemporary repertoires, utilising computational tools to enhance auditory memory and expand interpretative possibilities.
Auditory memory plays a pivotal role in aural analysis, an approach that serves as a valuable alternative or complement to traditional score-based analysis. Rooted in Pierre Schaeffer’s typomorphology of objets sonores and the work of other analysts in electroacoustic music studies, the general principles of aural analysis can be outlined in a three-step process: 1) attentive listening to the acoustic properties of sounds, 2) describing and categorising their sonic variations, and 3) assessing their functions within a large-scale formal structure. Computational sound visualisation tools are frequently employed in this process to assist in transcribing and retaining musical events that are either absent from the score or difficult to interpret aurally due to textural complexities and/or timbral elusiveness. Despite their increasing use, however, the full potential of these tools remains largely unexplored in contemporary music studies. By digitally decomposing the transformation processes of ambiguous musical flows and supporting the organisation and structuring of auditory memory, computational analysis of audio data and various visualisation methods can deepen our understanding of both local sonic morphology and large-scale formal trajectory.
In line with these considerations, the paper investigates how specialised computer interfaces can facilitate music analytical processes. Two research questions guide this investigation: 1) How can we analyse a stream of sonic textures; and 2) How can we outline the formal structure of a work that embraces extremes of sonic energy and polyrhythmic intricacy? To explore these questions, we have developed muScope, a new computer program that enables users to browse within high-resolution sonograms in tandem with a range of graphical representations capturing audio, timbral, rhythmic and structural descriptions. The analysis of spectral “fluctuations” allows for the identification of rapid pulsations at the middle ground between rhythm and timbre. Self-similarity matrix representations can serve as a tool for outlining the structural division of the audio data based on various sonic attributes. We integrate these visual representations into an analytical workflow designed to support the construction of a composition’s formal structure.
Our methods are demonstrated through an analysis of excerpts from Kaija Saariaho’s Io for large ensemble and electronics (1986–87) and Rapha?l Cendo’s Corps for piano and ensemble (2015). This integrated analytical approach offers new insights into the interplay between musical perception, memory and analytical interpretation using digital tools.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254566" class="vrtx-external-publication">
        <div id="vrtx-publication-10254566">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254566">
                Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Music Analysis Applied to Music Therapy Improvisation.
                </span>
                            
            doi: <a href="https://doi.org/https:/ifas.thws.de/fileadmin/user_upload/250917_HIGH-M_Symposium_Programme_updated.pdf">https:/ifas.thws.de/fileadmin/user_upload/250917_HIGH-M_Symposium_Programme_updated.pdf</a>.
            <a href="https://hdl.handle.net/11250/5029970">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254564" class="vrtx-external-publication">
        <div id="vrtx-publication-10254564">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254564">
                Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Music Analysis: Toolbox and application to music psychology &amp; therapy.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4016891">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254562" class="vrtx-external-publication">
        <div id="vrtx-publication-10254562">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254562">
                Christodoulou, Anna-Maria &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Multimodal Dataset of Greek Folk Music.
                </span>
                            
            doi: <a href="https://doi.org/https:/dlfm.web.ox.ac.uk/2025-programme">https:/dlfm.web.ox.ac.uk/2025-programme</a>.
            <a href="https://hdl.handle.net/11250/3894993">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2392499" class="vrtx-external-publication">
        <div id="vrtx-publication-2392499">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392499">
                Monstad, Lars L?berg
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Bandet har millioner av avspillinger p? Spotify uten ? eksistere: – Problematisk.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5186020">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Kunstig intelligens vil prege musikkproduksjon fremover, tror musikkbransjen. – Det f?les meningsl?st ? lage musikk manuelt, sier musiker i Brenn.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2304261" class="vrtx-external-publication">
        <div id="vrtx-publication-2304261">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2304261">
                Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Successes and challenges of computational approaches for audio and music analysis and for predicting music-evoked emotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4501227">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Background
Decades of research in computational sound and music analysis has led to a large range of analysis tools offering rich and diverse description of music, although a large part of the subtlety of music remains out of reach. These descriptors are used to establish computational models predicting perceived or induced emotion directly from music. Although the models can predict a significant amount of variability of emotions experimentally measured (Panda et al., 2023), further progress seems hard to achieve, probably due to the subtlety of music and of the mechanisms underlying the evocation of emotion from music.
Aims
An extensive but synthetic panorama of computational research in sound and music analysis as well as emotion prediction from music is presented. Core challenges are highlighted and prospective ways forward are suggested.
Main contribution
For each separate music dimension (dynamics, timbre, rhythm, tonality and mode, motifs, phrasing, structure and form), a synthetic panorama of the state of the art is evoked, highlighting strengths and challenges as well as indicating how particular sound and music features have been found to correlate with rated emotions. The various strategies for modelling emotional reactions to audio and musical features are presented and discussed.
One common general analytical approach carries out a broad and approximate analysis of the audio recording based on simple mathematical models, describing individual audio or musical characteristics numerically. It is suggested that such loose approach might tend to drift away from commonly understood musical processes and to generate artefacts. This vindicates a more traditional musicological approach based on a focus on the score or approximations of it – through automated transcription if necessary – and a reconstruction of the types of traditional representations commonly studied in musicology. I also argue for the need to closely reflect the way humans listen to and understand music, inspired by a cognitive perspective. Guided by these insights, I sketch the idea of a complex system made of interdependent modules, founded on sequential pattern inference and activation scores not based on statistical sampling.
I also suggest perspectives for the improvement of computational prediction of emotions evoked by music. Discussion and conclusion
Further improvements of computational music analysis methods, as well as emotion prediction, seem to call for a change of modelling paradigm.
References
R. Panda, R. Malheiro, R. Paiva, &quot;Audio Features for Music Emotion Recognition: A Survey&quot;, IEEE Transactions on Affective Computing, 14-1, 68-88, 2023.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2304273" class="vrtx-external-publication">
        <div id="vrtx-publication-2304273">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2304273">
                Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        KI-verkt?y for h?ndtering, transkribering og analyse av musikkarkiver.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4903769">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Jeg presenterer en rekke verkt?y utviklet i 澳门皇冠体育,皇冠足球比分 med Nasjonalbiblioteket. AudioSegmentor deler automatisk b?ndopptak i individuelle musikkstykker. Dette verkt?yet forenklet digitaliseringen av Norsk folkemusikksamling. Vi bruker avanserte dyp l?ringsmetoder for ? skape et banebrytende automatisk musikktranskriberingssystem, MusScribe, f?rst finjustert for Hardingfele, og n? gjort tilgjengelig for musikkarkivprofesjonelle for et bredt spekter av musikk. Jeg diskuterer ogs? v?re p?g?ende fremskritt innen den automatiserte musikologiske analysen av folkemusikkstykker og omfattende samlinger.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2304268" class="vrtx-external-publication">
        <div id="vrtx-publication-2304268">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2304268">
                Ziegler, Michelle; Sudo, Marina; Akkermann, Miriam &amp; Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards Collaborative Analysis: Kaija Saariaho’s IO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4284312">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2272472" class="vrtx-external-publication">
        <div id="vrtx-publication-2272472">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272472">
                Thedens, Hans-Hinrich &amp; Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Norwegian Catalogue of Folk Music Online.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3599730">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2272484" class="vrtx-external-publication">
        <div id="vrtx-publication-2272484">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272484">
                Monstad, Lars L?berg &amp; Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        muScribe: a new transcription service for music professionals.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4228093">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2272481" class="vrtx-external-publication">
        <div id="vrtx-publication-2272481">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272481">
                Johansson, Mats Sigvard &amp; Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Automated transcription of Hardanger fiddle music: Tracking the beats.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3466521">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2272476" class="vrtx-external-publication">
        <div id="vrtx-publication-2272476">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272476">
                Monstad, Lars L?berg &amp; Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Automated transcription of Hardanger fiddle music: Detecting the notes.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3596668">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2272462" class="vrtx-external-publication">
        <div id="vrtx-publication-2272462">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272462">
                Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MIRAGE Closing Seminar: Digitisation and computer-aided music analysis of folk music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4078426">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">One aim of the MIRAGE project is to conceive new technologies allowing to better access, understand and appreciate music, with a particular focus on Norwegian folk music. This seminar presents what has been achieved during the four years of the project, leading in particular to the digital version of the Norwegian Catalogue of Folk Music. We are also conceiving tools to automatically transcribe audio recordings of folk music. More advanced musicological applications are discussed as well. To conclude, we introduce the new spinoff project, called muScribe, aimed at the development of transcription services, for a broad range of music, besides folk music, in a first stage tailored to professional organisations such as archives, publishers and producers.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2272469" class="vrtx-external-publication">
        <div id="vrtx-publication-2272469">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272469">
                Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Overview of the MIRAGE project.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4851760">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2272404" class="vrtx-external-publication">
        <div id="vrtx-publication-2272404">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272404">
                Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Harmonizing Tradition with Technology: Enhancing Norwegian Folk Music through Computational Innovation.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5161370">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">My work involves developing computational tools to safeguard and elevate the cultural significance of music repertoires, with a focus on a cooperative project with the National Library of Norway related to their collection of Norwegian folk music. Our first phase centered on transforming unstructured audio tapes into a systematic dataset of melodies while ensuring its access and longevity through efficient data management and linking with other catalogues.
Our core activity involves transcribing audio recordings into scores, comparing the traditional manual method with our modern attempts towards automation. Providing detailed performance notation, the close alignment between scores and audio recordings will help improve comprehension and overall accessibility, as well as a more advanced structuring of the collection.
Challenges arose when incorporating this music into the International Inventory of Musical Sources (RISM) database due to the incompatible &#39;incipit&#39; concept, unfitting genres like Hardanger fiddle folk music. We suggest innovative generalisations for this concept. Moreover, we&#39;re creating techniques to digitally dissect the musical corpus, aiming to extract key features of each tune. This initiative not only serves as an alternative to incipits but also provides novel metadata formats, increasing the usability and connectivity within its content and with other databases.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2190221" class="vrtx-external-publication">
        <div id="vrtx-publication-2190221">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2190221">
                Monstad, Lars L?berg
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstig Intelligens i kunst og kultur.
                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK Dagsrevyen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4580511">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2190224" class="vrtx-external-publication">
        <div id="vrtx-publication-2190224">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2190224">
                Monstad, Lars L?berg; Larsen, Borgan Silje &amp; Vegard, Waske
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        AI i musikken: konsekvenser og muligheter.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4742955">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2190275" class="vrtx-external-publication">
        <div id="vrtx-publication-2190275">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2190275">
                Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards a Comprehensive Modelling Framework for Computational Music Transcription/Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5245378">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Computational music analysis, still in its infancy, lacking overarching reliable tools, can be seen at the same time as a promising approach to fulfill core epistemo- logical needs. Analysis in the audio domain, although approaching music in its entirety, is doomed to superficiality if it does not fully embrace the underlying symbolic system, requiring a complete automated transcription and scaffolding of metrical, modal/harmonic, voicing and formal structures on top of the layers of elementary events (such as notes). Automated transcription enables to get over the polarity between sound and music notation, providing an interfacing semiotic system that combines the advantages of both domains, and surpassing the limitation of traditional approaches based on graphic representations. Deep learning and signal processing approaches for the discretisation of the continuous signal are compared and discussed. The multi-dimensional music transcription and analysis framework (where both tasks are actually deeply intertwined) requires to take into account the far-reaching interdependencies between dimensions, for instance between motivic and metrical analysis. We propose an attempt to build such a comprehensive framework, founded on general musical and cognitive principles and an attempt to build music analysis capabilities through a combina- tion of simple and general operators. The validity of the analyses is addressed in close discussion with music experts. The potential capability to produce valid analyses for a very large corpus of music would make such a complex system a potentially relevant blueprint for a cognitive modelling of music understanding. We try to address a large diversity of music cultures and their specific challenges: among others, maqam modes (with Mondher Ayari), Norwegian Hardanger fiddle rhythm (with Mats Johansson and Hans-Hinrich Thedens), djembe drumming from Mali (with Rainer Polak) or electroacoustic music (Towards a Toolbox des objets musicaux, with Rolf Inge God?y). We aim at making the framework fully transparent, collaborative and open.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2190220" class="vrtx-external-publication">
        <div id="vrtx-publication-2190220">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2190220">
                Monstad, Lars Alfred L?berg
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Demonstrasjon av Kunstig Intelligens som verkt?y for komponister.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3922625">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2196647" class="vrtx-external-publication">
        <div id="vrtx-publication-2196647">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2196647">
                Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational audio and musical features extraction: from MIRtoolbox to the MiningSuite.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3772950">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2191210" class="vrtx-external-publication">
        <div id="vrtx-publication-2191210">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2191210">
                Christodoulou, Anna-Maria; Lartillot, Olivier &amp; Anagnostopoulou, Christina
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Analysis of Greek Folk Music of the Aegean.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4644595">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2201435" class="vrtx-external-publication">
        <div id="vrtx-publication-2201435">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2201435">
                Monstad, Lars Alfred L?berg; Baden, Peter &amp; W?rstad, Bernt Isak Grave
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan kunstig intelligens brukes i l?tskriverprosessen?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3621622">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2176767" class="vrtx-external-publication">
        <div id="vrtx-publication-2176767">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2176767">
                Monstad, Lars Alfred L?berg
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        KI kan demokratisere musikkbransjen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4715655">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2196657" class="vrtx-external-publication">
        <div id="vrtx-publication-2196657">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2196657">
                Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Dynamic Visualisation of Fugue Analysis, Demonstrated in a Live Concert by the Danish String Quartet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5218037">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2196656" class="vrtx-external-publication">
        <div id="vrtx-publication-2196656">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2196656">
                Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards a comprehensive model for computational music transcription and analysis: a necessary dialog between machine learning and rule-based design?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5165929">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2196653" class="vrtx-external-publication">
        <div id="vrtx-publication-2196653">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2196653">
                Lartillot, Olivier &amp; Monstad, Lars L?berg
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MIRAGE - A Comprehensive AI-Based System for Advanced Music Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4264872">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200583" class="vrtx-external-publication">
        <div id="vrtx-publication-2200583">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200583">
                Bishop, Laura; H?ffding, Simon; Laeng, Bruno &amp; Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mental effort and expressive interaction in expert and student string quartet performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4263573">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200584" class="vrtx-external-publication">
        <div id="vrtx-publication-2200584">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200584">
                Lartillot, Olivier; Swarbrick, Dana; Upham, Finn &amp; Cancino-Chacón, Carlos Eduardo
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video visualization of a string quartet performance of a Bach Fugue: Design and subjective evaluation.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5042966">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200582" class="vrtx-external-publication">
        <div id="vrtx-publication-2200582">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200582">
                Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MIRAGE Symposium #2: Music, emotions, analysis, therapy ... and computer.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5080896">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The 2nd MIRAGE Symposium covers a broad range of topics related to the MIRAGE project, mainly related to music and emotion, music cognition in general, music analysis and music therapy. Featuring two keynotes by Patrik Juslin and Didier Grandjean.
</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200587" class="vrtx-external-publication">
        <div id="vrtx-publication-2200587">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200587">
                Maidhof, Clemens; Agres, Kat; Fachner, J?rg &amp; Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Intra- and inter-brain coupling during music therapy.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4235844">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200586" class="vrtx-external-publication">
        <div id="vrtx-publication-2200586">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200586">
                Wosch, Thomas; Vobig, Bastian; Lartillot, Olivier &amp; Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        HIGH-M (Human Interaction assessment and Generative segmentation in Health and Music).
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5108559">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200585" class="vrtx-external-publication">
        <div id="vrtx-publication-2200585">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200585">
                Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music Therapy Toolbox, and prospects.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4548962">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200580" class="vrtx-external-publication">
        <div id="vrtx-publication-2200580">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200580">
                Lartillot, Olivier; Thedens, Hans-Hinrich; Mjelva, Olav Lukseng?rd; Elovsson, Anders; Monstad, Lars L?berg &amp; Johansson, Mats Sigvard
                    <a href="javascript:void(0);" title="Hent alle deltakere" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2200580/contributors', 'vrtx-publication-contributors-2200580')">
                    [Vis alle&nbsp;8&nbsp;forfattere av denne artikkelen]</a>
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Norwegian Folk Music &amp; Computational Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3480827">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">As a prélude for Norway&#39;s Constitution Day, this special event celebrated the Norwegian folk music tradition, showcasing our new online archive and demonstrating the richness of Hardanger fiddle music, with live performance. One aim of the project is to conceive new technologies allowing to better access, understand and appreciate Norwegian folk music.

In this event, we introduced a new online version of the Norwegian Folk Music Archive and discuss underlying theoretical and technical challenges. A live concert/workshop, with the participation of Olav Lukseng?rd Mjelva, offered a lively introduction to Hardanger fiddle music and its elaborate rhythm. The interests and challenges of automated transcription and analysis were discussed, with the public release of our new software Annotemus.

The symposium was organised in the context of the MIRAGE project (RITMO, in collaboration with the National Library of Norway&#39;s Digital Humanities Laboratory).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200579" class="vrtx-external-publication">
        <div id="vrtx-publication-2200579">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200579">
                Lartillot, Olivier &amp; Monstad, Lars L?berg
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational music analysis: Significance, challenges, and our proposed approach.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3636537">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Music is something that we mostly all appreciate, yet it remains a hidden and enigmatic concept for many of us. Music notation, in the form of music scores, facilitates practicing and enhances the understanding of the richness of musical works. However, acquiring musical scores for any music performance is a tedious and demanding task (called music transcription) that demands considerable proficiency. Hence the interest of computational automation. But music is not just notes, it is also melody, rhythm, themes, timbre, and very subtle aspects such as form. While many of us may not be consciously familiar with these concepts, they still have a subconscious influence on our aesthetic experience. Interestingly, it often happens that the more we consciously understand the underlying language of music, the more we tend to appreciate and enjoy it. Therefore, there is value in creating computational tools that can automate and enhance these types of analyses.
 
The presenters&#39; past work resulted in the creation of Matlab&#39;s MIRtoolbox, which measures a broad range of musical characteristics directly from audio through signal processing techniques. Currently, the MIRAGE project prioritises music transcription (with a particular focus on Norwegian folk music), blending neural-network-based deep learning with conventional rule-based models. Through this project, they highlight the importance of acknowledging the interconnectedness between all musical elements. Additionally, they have crafted animated visualisations to make analyses more accessible to the general public and are aiming to make music transcription technology available to the public, with support from UiO Growth House.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200677" class="vrtx-external-publication">
        <div id="vrtx-publication-2200677">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200677">
                Monstad, Lars L?berg &amp; Lartillot, Olivier
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Automatic Transcription Of Multi-Instrumental Songs: Integrating Demixing, Harmonic Dilated Convolution, And Joint Beat Tracking.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3449326">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">In the rapidly expanding field of music information retrieval (MIR), automatic transcription remains one of the most sought-after capabilities, especially for songs that employ multiple instruments. Musscribe emerges as a state-of-the-art transcription tool that addresses this challenge by integrating three distinct methodologies: demixing, harmonic dilated convolution, and joint beat tracking. Demixing is employed to isolate individual instruments within a song by separating overlapping audio sources, thus ensuring each instrument is transcribed distinctly. Beat tracking is then run as a parallel process to extract the joint beat and downbeat estimations. These processes results in an output midi file, which is then quantized using information derived from the beat tracking. As such, this method paves the way for more accurate and sophisticated analyses, bridging the gap between human and machine understanding of music. Together, these methodologies allow us to produce transcriptions that are not only accurate but also highly representative of the original compositions. Preliminary tests and evaluations showcase the potential in transcribing complex musical pieces with high fidelity, outperforming many contemporary tools in the market. This innovative approach not only has implications for music transcription but also for broader applications in audio analysis, remixing, and digital music production. The model has been instrumental in accelerating the composition process for several Norwegian television shows. Moreover, its efficacy can be observed in the Netflix series &quot;A Storm for Christmas.&quot; Renowned composer Peter Baden harnessed this tool to enhance his workflow, proving the demand for innovative tools like this in the professional music industry.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2077535" class="vrtx-external-publication">
        <div id="vrtx-publication-2077535">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077535">
                Lartillot, Olivier; God?y, Rolf Inge &amp; Christodoulou, Anna-Maria
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational detection and characterisation of sonic shapes: Towards a Toolbox des objets sonores.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4626053">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Computational detection and analysis of sound objects is of high importance both for musicology and sound design. Yet Music Information Retrieval technologies have so far been mostly focusing on transcription of music into notes in a classical sense whereas we are interested in detecting sound objects and their feature categories, as was suggested by Pierre Schaeffer’s typology and morphology of sound objects in 1966, reflecting basic sound-producing action types. We propose a signal-processing based approach for segmentation, based on a tracking of the salient characteristics over time, and dually Gestalt-based segmentation decisions based on changes. Tracking of pitched sound relies on partial tracking, whereas the analysis of noisy sound requires tracking of larger frequency bands possibly varying over time. The resulting sound objects are then described based on Schaeffer’s taxonomy and morphology, expressed first in the form of numerical descriptors, each related to one type of taxonomy (percussive/sustained/iterative, stable/moving pitch vs unclear pitch) or morphology (such as grain). This multidimensional feature representation is further divided into discrete categories related to the different classes of sounds. The typological and morphological categorisation is driven by the theoretical and experimental framework of the morphodynamical theory. We first experiment on isolated sounds from the Solfège des objets sonores—which features a large variety of sound sources—before considering more complex configurations featuring a succession of sound objects without silence or with simultaneous sound objects. Analytical results are visualised in the form of graphical representations, aimed both for musicology and music pedagogy purposes. This will be applied to the graphical descriptions of and browsing within large music catalogues. The application of the analytical descriptions to music creation is also investigated.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2077692" class="vrtx-external-publication">
        <div id="vrtx-publication-2077692">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077692">
                Lartillot, Olivier
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The MIRAGE project: Unlocking new computational abilities in computational music analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4524725">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2077696" class="vrtx-external-publication">
        <div id="vrtx-publication-2077696">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077696">
                Lartillot, Olivier
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational music analysis: Application to music &amp; emotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4680712">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2077560" class="vrtx-external-publication">
        <div id="vrtx-publication-2077560">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077560">
                Lartillot, Olivier; Elovsson, Anders; Johansson, Mats Sigvard; Thedens, Hans-Hinrich &amp; Monstad, Lars Alfred L?berg
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Segmentation, Transcription, Analysis and Visualisation of the Norwegian Folk Music Archive.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3409923">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">We present an ongoing project dedicated to the transmutation of a collection of field recordings of Norwegian folk music established in the 1960s into an easily accessible online catalogue augmented with advanced music technology and computer musicology tools. We focus in particular on a major highlight of this collection: Hardanger fiddle music. The studied corpus was available as a series of 600 tape recordings, each tape containing up to 2 hours of recordings, associated with metadata indicating approximate positions of pieces of music. We first need to retrieve the individual recording associated with each tune, through the combination of an automated pre-segmentation based on sound classification and audio analysis, and a subsequent manual verification and fine-tuning of the temporal positions, using a home-made user interface.
Note detection is carried out by a deep learning method. To adapt the model to Hardanger fiddle music, musicians were asked to record themselves and annotate all played note, using a dedicated interface. Data augmentation techniques have been designed to accelerate the process, in particular using alignment of varied performances of same tunes. The transcription also requires the reconstruction of the metrical structure, which is particularly challenging in this style of music. We have also collected ground-truth data, and are conceiving a computational model.
The next step consists in carrying out detailed music analysis of the transcriptions, in order to reveal in particular intertextuality within the corpus. A last direction of research is aimed at designing tools to visualise each tune and the whole catalogue, both for musicologists and general public.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2018263" class="vrtx-external-publication">
        <div id="vrtx-publication-2018263">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2018263">
                Danielsen, Anne; C?mara, Guilherme Schmidt; Lartillot, Olivier; Leske, Sabine Liliana &amp; Spiech, Connor
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musical rhythm. Behavioural, computational and neurophysiological perspectives.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4706056">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1932588" class="vrtx-external-publication">
        <div id="vrtx-publication-1932588">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1932588">
                Lartillot, Olivier &amp; Johansson, Mats Sigvard
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Automated beat tracking of Norwegian Hardanger fiddle music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4755804">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Norwegian Hardanger fiddle music is typically played by a solo fiddler, without rhythmic accompaniment except for the musician’s discreet foot stomping. Some of its repertoire features an asymmetrical ternary meter, with an uneven proportion of durations between the three beats of each bar, and with varying degrees of fluctuation of those proportions throughout each piece. In addition, there is often no clear audible onset corresponding to the beat position. As a result, many listeners find it difficult to hear the beats without experience from playing or dancing, and the beat onsets cannot be properly tracked by state-of-the-art beat trackers.

The aim of this study is to develop a computational model of beat tracking of Hardanger fiddle music. Due to the rhythmic irregularity of the music, computational approaches relying on the detection of regular periodicities cannot be used. The proposed strategy adopts a cognitive perspective, modeling processes that progressively infer beats while scanning the music sequence chronologically. To each successive note is associated a tentative metrical position, which is determined based on a set of rules, using various input data such as (1) the ratio of the inter-onset interval (IOI) from the previous beat onset to the current note onset and the preceding inter-beat-onset interval and (2) the ratio of the IOI from the bar onset to the current note onset and the preceding inter-bar-onset interval. Successive repetition of eighth notes (as well as of eighth-note triplets) induce specific states that also guide the subsequent extension of the sequence. Multiple beat tracking scenarios can coexist at particular moments in the tune for very short periods. In particular, the very first notes at the beginning of the tune may initially imply conflicting metrical structures and tempi. The conflicting parallel beat tracking scenarios are progressively extended note after note in parallel. A scenario ends whenever it reaches a dead-end situation where the music is in total contradiction. Multiple scenarios are fused when they are continued exactly the same way, and only the scenario deemed the most congruent is retained.

One particularity of Hardanger fiddle music is that beat onsets are not precise points in time but rather diffuse temporal extension, closely related to the notion of beat bin (Danielsen, 2010). Sometimes, multiple successive notes can all be considered as possible onsets for a given beat (Johansson, 2010; Stover et al., 2021). This multiplicity of beat onsets has been integrated into the model. 

Most of the analysis can be carried out using solely note onset time as input data, although more challenging cases occasionally require taking into account note duration or higher structure such as motivic repetition. This indicates that a proper beat tracker needs to be integrated as a module within a comprehensive music analysis framework, with bidirectional dependencies with the other modules of the framework.  The model has so far been tuned and tested on a couple of tunes only. Its application to the automated analysis of a larger corpus is under investigation.


Danielsen, Anne (2010). “Here, there, and everywhere. Three accounts of pulse in D&#39;Angelo&#39;s &#39;Left and Right’.” In A. Danielsen (Ed.), Musical Rhythm in the Age of Digital Reproduction. Farnham: Ashgate/Routledge, UK.
Johansson, Mats (2010). “The Concept of Rhythmic Tolerance – Examining Flexible Grooves in Scandinavian Folk-fiddling.” In A. Danielsen (Ed.), Musical Rhythm in the Age of Digital Reproduction. Farnham: Ashgate/Routledge, UK. 
Stover, Chris; Danielsen, Anne &amp; Johansson, Mats (2021). “Bins, Spans, Tolerance: Three Theories of Microtiming Behavior.” [under review in Music Theory Spectrum].</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1939285" class="vrtx-external-publication">
        <div id="vrtx-publication-1939285">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939285">
                Danielsen, Anne
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Opening remarks, presentation of RITMO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3553854">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1941979" class="vrtx-external-publication">
        <div id="vrtx-publication-1941979">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1941979">
                Lartillot, Olivier &amp; Lillesl?tten, Mari
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Olivier Lartillot utvikler verkt?y for ? forst? musikk bedre.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                         Det humanistiske fakultet UiO YouTube account.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3507697">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Kunstig intelligens kan hjelpe deg ? forst? musikk bedre. 
UiO-forsker Olivier Lartillot jobber for at ny teknologi kan ?pne folks ?rer for ny musikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1941972" class="vrtx-external-publication">
        <div id="vrtx-publication-1941972">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1941972">
                Lartillot, Olivier &amp; Lillesl?tten, Mari
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Artificial intelligence can help you understand music better.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        RITMO News.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3973156">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Algorithms and technology have so far helped listeners to more of the same music. Now, UiO researchers are working on new technology that can get people interested in a greater musical variety.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1939131" class="vrtx-external-publication">
        <div id="vrtx-publication-1939131">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939131">
                Elovsson, Anders &amp; Lartillot, Olivier
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Hardanger Fiddle Dataset with Performances Spanning Emotional Expressions and Annotations Aligned using Image Registration.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4051961">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper presents a Hardanger fiddle dataset “HF1” with polyphonic performances spanning five different emotional expressions: normal, angry, sad, happy, and tender. The performances thus cover the four quadrants of the activity/valence-space. The onsets and offsets, together with an associated pitch, were human-annotated for each note in each performance by the fiddle players themselves. First, they annotated the normal version. These annotations were then transferred to the expressive performances using music alignment and finally human-verified. Two separate music alignment methods based on image registration were developed for this purpose; a B-spline implementation that produces a continuous temporal transformation curve and a Demons algorithm that produces displacement matrices for time and pitch that also account for local timing variations across the pitch range. Both methods start from an “Onsetgram” of onset salience across pitch and time and perform the alignment task accurately. Various settings of the Demons algorithm were further evaluated in an ablation study. The final dataset is around 43 minutes long and consists of 19 734 notes of Hardanger fiddle music, recorded in stereo. The dataset and source code are available online. The dataset will be used in MIR research for tasks involving polyphonic transcription, score alignment, beat tracking, downbeat tracking, tempo estimation, and classification of emotional expressions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1939118" class="vrtx-external-publication">
        <div id="vrtx-publication-1939118">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939118">
                Lartillot, Olivier &amp; Weisser, Stéphanie
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Roughness, Crackliness, Buzzingness, ...: Characterizations of Sonic Unsteadiness and Application to the Analysis of Traditional Music from Ethiopia, Kenya, Morocco and India.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4556945">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939110" class="vrtx-external-publication">
        <div id="vrtx-publication-1939110">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939110">
                Tidemann, Aleksander &amp; Lartillot, Olivier
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interactive tools for exploring performance patterns in hardanger fiddle music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4241328">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939267" class="vrtx-external-publication">
        <div id="vrtx-publication-1939267">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939267">
                Elovsson, Anders
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Polyphonic transcription and generation of annotated datasets using score alignment.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4844698">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1923047" class="vrtx-external-publication">
        <div id="vrtx-publication-1923047">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1923047">
                Haugen, Mari Romarheim
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Asymmetrical Meter and Periodic Body Motion in Norwegian Telespringar Performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4899903">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939279" class="vrtx-external-publication">
        <div id="vrtx-publication-1939279">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939279">
                Lartillot, Olivier; Guldbrandsen, Erling Eliseus &amp; Cancino-Chacón, Carlos Eduardo
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Dynamics analysis, and application to a comparative study of Bruckner performances.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3997895">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939260" class="vrtx-external-publication">
        <div id="vrtx-publication-1939260">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939260">
                Lartillot, Olivier
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Presentation of MIRAGE project.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4285656">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939271" class="vrtx-external-publication">
        <div id="vrtx-publication-1939271">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939271">
                Johansson, Mats Sigvard
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Representing meter in traditional fiddle music: Accounting for variability and ambiguities.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4357635">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939275" class="vrtx-external-publication">
        <div id="vrtx-publication-1939275">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939275">
                Lartillot, Olivier &amp; Johansson, Mats Sigvard
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tracking beats in Hardanger fiddle tunes.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3987558">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939276" class="vrtx-external-publication">
        <div id="vrtx-publication-1939276">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939276">
                Lartillot, Olivier; Elovsson, Anders &amp; Mjelva, Olav Lukseng?rd
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A new software for computer-assisted annotation of music recordings, with a focus on transcription.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5165913">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939262" class="vrtx-external-publication">
        <div id="vrtx-publication-1939262">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939262">
                Thedens, Hans-Hinrich
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Archiving representations of a folk music tradition in sound and notation.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3908027">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939281" class="vrtx-external-publication">
        <div id="vrtx-publication-1939281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939281">
                God?y, Rolf Inge &amp; Lartillot, Olivier
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Acoustic substrates of musique concrète features: Towards a Toolbox de l&#39;objet musical?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3787819">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939273" class="vrtx-external-publication">
        <div id="vrtx-publication-1939273">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939273">
                Tidemann, Aleksander
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Hardanger fiddle performance patterns through interactive tools.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4313315">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1995686" class="vrtx-external-publication">
        <div id="vrtx-publication-1995686">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1995686">
                Tidemann, Aleksander; Lartillot, Olivier &amp; Johansson, Mats Sigvard
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards New Analysis And Visualization Software For Studying Performance Patterns in Hardanger Fiddle Music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4048918">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Analyzing musical performances is a challenging and emergent field of computational music research, aiming to reveal performance patterns and link them to musical contexts. There exists a modest amount of computational research on Hardanger fiddle performances. The MIRAGE research project is currently contributing to this scientific body, developing advanced MIR frameworks that build on recent musicological research. This paper presents the development and evaluation of two Max/MSP/Jitter software applications for music analysis and data visualization that integrate contemporary research perspectives on the complex rhythmical structuring of springar performances, investigating how we can design user-friendly computational tools that explore performance patterns in Hardanger fiddle music, in collaboration with MIRAGE.
Based on a small questionnaire and a few operational tests, the study shows an interest in more effective software tools capable of revealing complex interrelations between musical dimensions in Hardanger fiddle performances. Additionally, the study highlights design considerations for tools aiming to increase the availability of computational music research in the field of musicology, such as cross-compatibility and integrated features that actively facilitate nuanced interpretation processes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1937664" class="vrtx-external-publication">
        <div id="vrtx-publication-1937664">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1937664">
                Dalgard, Joachim; Lartillot, Olivier; Vuoskoski, Jonna Katariina &amp; Guldbrandsen, Erling Eliseus
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Absorption - Somewhere between the heart and the brain.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5174397">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1830924" class="vrtx-external-publication">
        <div id="vrtx-publication-1830924">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1830924">
                Lartillot, Olivier &amp; Toiviainen, Petri
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Read about the Matlab MIRtoolbox.                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Young Acousticians Network (YAN) Newsletter.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3547704">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">MIRtoolbox is a Matlab toolbox dedicated to the analysis of music and sound from audio recordings and to the extraction of musical features such as tonality, rhythm, or structures. It has also been used for non- musical applications, such as in Non Destructive Testing, and with non-audio signals. In this issue of the newsletter, the YAN discusses the MIRtoolbox with Olivier Lartillot (RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion, University of Oslo, Norway) and Petri Toiviainen (University of Jyv?skyl?, Finland)
You can also check out the MIRtoolbox website at:
shorturl.at/oA038</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1882746" class="vrtx-external-publication">
        <div id="vrtx-publication-1882746">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1882746">
                Bruford, Fred &amp; Lartillot, Olivier
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Multidimensional similarity modelling of complex drum loops using the GrooveToolbox.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5119520">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The GrooveToolbox is a new Python toolbox implementing various algorithms, new and pre-existing, for the analysis and comparison of symbolic drum loops, including rhythm features, similarity metrics and microtiming features. As part of the GrooveToolbox we introduce two new metrics of rhythm similarity and four features for describing the significant properties of microtiming deviations in drum loops. Based on a two-part perceptual evaluation, we show these four new microtiming features can each correlate to similarity perception, and be used with rhythm similarity metrics to improve personalized similarity models for drum loops. A new measure of structural rhythmic similarity is also shown to correlate more strongly to similarity perception of drum loops than the more com- monly used Hamming distance. These results point to the potential application of the GrooveToolbox and its new features in drum loop analysis for intelligent music production tools. The GrooveToolbox may be found at: https://github.com/fredbru/GrooveToolbox</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1830620" class="vrtx-external-publication">
        <div id="vrtx-publication-1830620">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1830620">
                Lartillot, Olivier &amp; Bruford, Fred
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Bistate reduction and comparison of drum patterns.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4583132">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper develops the hypothesis that symbolic drum patterns can be represented in a reduced form as a sim- ple oscillation between two states, a Low state (commonly associated with kick drum events) and a High state (often associated with either snare drum or high hat). Both an onset time and an accent time is associated to each state. The systematic inference of the reduced form is formal- ized. This enables the specification of a rhythmic struc- tural similarity measure on drum patterns, where reduced patterns are compared through alignment. The two-state representation allows a low computational cost alignment, once the complex topological formalization is fully taken into account. A comparison with the Hamming distance, as well as similarity ratings collected from listeners on a drum loop dataset, indicates that the bistate reduction enables to convey subtle aspects that goes beyond surface-level com- parison of rhythmic textures.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1815236" class="vrtx-external-publication">
        <div id="vrtx-publication-1815236">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1815236">
                Lartillot, Olivier; Cancino-Chacón, Carlos &amp; Brazier, Charles
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Real-Time Visualisation Of Fugue Played By A String Quartet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4624027">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">We present a new system for real-time visualisation of music performance, focused for the moment on a fugue played by a string quartet. The basic principle is to offer a visual guide to better understand music using strategies that should be as engaging, accessible and effective as possible. The pitch curves related to the separate voices are drawn on a space whose temporal axis is normalised with respect to metrical positions, and aligned vertically with respect to their thematic and motivic classification. Aspects related to tonality are represented as well. We describe the underlying technologies we have developed and the technical setting. In particular, the rhythmical and structural representation of the piece relies on real-time polyphonic audio-to-score alignment using online dynamic time warping. The visualisation will be presented at a concert of the Danish String Quartet, performing the last piece of The Art of Fugue by Johann Sebastian Bach.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254582" class="vrtx-external-publication">
        <div id="vrtx-publication-10254582">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254582">
                Lartillot, Olivier &amp; Sudo, Marina
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        AcousMuScope: Users&#39; Guide.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-REPORT">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.uio.no/ritmo/english/projects/mirage/software/AcousMuScope/index.html">https:/www.uio.no/ritmo/english/projects/mirage/software/AcousMuScope/index.html</a>.
            <a href="https://hdl.handle.net/11250/4879710">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">AcousMuScope is a new software for music analysis of audio recordings, focusing on the graphical interface to browse into the analyses.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2334555" class="vrtx-external-publication">
        <div id="vrtx-publication-2334555">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2334555">
                Joachimiak, Grzegorz; Ahrendt, Rebekah &amp; Lartillot, Olivier
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Endangered Musical Sources: Strategies for Safeguarding, Digitization, and International Collaboration. Report of Working Group 2 SOURCES, Wroc?aw, 22–24 May 2024.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-COMPENDIUM">
                        Zenodo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4665478">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2077642" class="vrtx-external-publication">
        <div id="vrtx-publication-2077642">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077642">
                Christodoulou, Anna-Maria; Anagnostopoulou, Christina &amp; Lartillot, Olivier
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Analysis of Greek folk music of the Aegean islands.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        National and Kapodistrian University of Athens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3266277">Fulltekst i vitenarkiv</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/filter?fundingIdentifier=287152&amp;fundingSource=NFR">Se alle arbeider i NVA</a></p>
    </div>

      </div>
    </div>



	  
            

            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Publisert</span> <span class="published-date">12. mai 2019 23:45 </span>
        
          - <span class="last-modified-date">Sist endret</span> <span class="last-modified-date">18. nov. 2024 09:36</span>
        
        </div>
      
          </div>
          <div id="vrtx-additional-content">
            
      
      
      
        <div class="vrtx-project-contactinfo vrtx-frontpage-box">
          <h2>Kontakt</h2>
          <div class="vrtx-box-content">
            <p><a href="/ritmo/personer/fast/oliviel/index.html">Olivier Lartillot</a></p>

          </div>
        </div>
      
            
<div class="vrtx-person-list-participants vrtx-frontpage-box">
    <h2><a href="/ritmo/prosjekter/mirage/index.html?vrtx=list-related-persons">Deltakere</a></h2>

  <div class="vrtx-box-content">
      <ul>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="/ritmo/personer/fast/oliviel/index.html" class="vrtx-participant-name">Olivier Lartillot</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Anders Elovsson</span>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Lars Alfred L?berg Monstad</span>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.mn.uio.no/ifi/personer/vit/kyrrehg/index.html" class="vrtx-participant-name">Kyrre Glette</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.hf.uio.no/imv/personer/vit/emeriti/rolfig/index.html" class="vrtx-participant-name">Rolf Inge God?y</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.hf.uio.no/imv/personer/vit/fast/erlingeg/index.html" class="vrtx-participant-name">Erling E. Guldbrandsen</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.hf.uio.no/imv/personer/vit/fast/hanst/index.html" class="vrtx-participant-name">Hans T. Zeiner-Henriksen</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.hf.uio.no/imv/personer/vit/fast/oyvindyb/index.html" class="vrtx-participant-name">?yvin Dybsand</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="/ritmo/personer/senterledelse/anneda/index.html" class="vrtx-participant-name">Anne Danielsen</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="/ritmo/personer/senterledelse/alexanje/index.html" class="vrtx-participant-name">Alexander Refsum Jensenius</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Mari Romarheim Haugen</span>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.hf.uio.no/imv/personer/vit/fast/aksnes/index.html" class="vrtx-participant-name">Hallgjerd Aksnes</a>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Per Ole Hagen</span>

                    <span class="vrtx-participant-affiliation">
Universitetet i Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.nb.no/samlingen/musikk/" class="vrtx-participant-name">Hans-Hinrich Thedens</a>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
            <li>
              <div class="vrtx-participants vrtx-participant-no-image">
              <div class="vrtx-participant-info">
                  <a href="https://www.usn.no/om-usn/kontakt-oss/ansatte/mats-sigvard-johansson" class="vrtx-participant-name">Mats Sigvard Johansson</a>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
      </ul>
          <a class="all-messages" href="/ritmo/prosjekter/mirage/index.html?vrtx=list-related-persons">Detaljert oversikt over deltakere</a>
  </div>
</div>

            
            
      
            
      
      
        <div id="vrtx-related-content">
          <h2>Scientific Advisory Board</h2>

<ul>
	<li><a href="http://users.jyu.fi/~ptoiviai/">Petri Toiviainen</a></li>
	<li><a href="https://www.unige.ch/fapse/neuroemo/didier-grandjean">Didier Grandjean</a></li>
	<li><a href="https://vbn.aau.dk/en/persons/119171">David Meredith</a></li>
</ul>

<p>Follow us on Twitter: <a href="https://twitter.com/MirageUIO">@MirageUIO</a></p>

<h2>Varighet</h2>

<p>Desember 2019 - november 2023</p>

<h2>Finansiering</h2>

<p>MIRAGE er finansiert av Norges 澳门皇冠体育,皇冠足球比分sr?d under programmet IKTPLUSS.</p>

        </div>
      
          </div>
        </div>
      
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Kontakt</h2>
   <p><a href="/ritmo/om/">Kontakt oss</a><br>
   <a href="/om/finn-fram/omrader/gaustad/ga09/">Finn frem</a></p>
</div>
<div>
   <h2>Om nettstedet</h2>
   <p><a href="/om/regelverk/personvern/personvernerklering-nett.html">Bruk av informasjonskapsler</a><br>
   <a href="/ritmo/prosjekter/mirage/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Tilgjengelighetserkl?ring</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Ansvarlig for denne siden</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/prosjekter/mirage/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Logg inn
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/" title="G? til uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
