<!DOCTYPE html>
<html lang="no">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
      
        Anna-Maria Christodoulou
       - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="
      
        Anna-Maria Christodoulou
       - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Anna-Maria Christodoulou" />

    
      <meta name="twitter:description" content="Les denne saken p? UiOs nettsider." />
    

    
      <meta name="twitter:image" content="/ritmo/english/people/phd-fellows/annammc/testpic1.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/personer/stipendiater/annammc/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/personer/stipendiater/annammc/index.html" />
    <meta property="og:type" content="website" />
    
      
        <meta property="og:description" content="Les denne saken p? UiOs nettsider." />
      
    

    

    
      
      
        
        
          
            
            
              
              <meta property="og:image" content="/ritmo/english/people/phd-fellows/annammc/testpic1.jpg" />
              <meta property="og:image:width" content="200" />
              <meta property="og:image:height" content="300" />

              
                

                
                
                
                  
                

                
                
                
                <meta property="og:updated_time" content="1766133825" />
              
            
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context ritmo faculty no '  id="vrtx-person">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Hopp til innhold">
    <ul id="hidnav">
     <li><a href="#right-main">Hopp til hovedinnhold</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo" class="uio-host">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Spr?kmeny">
              <span>No</span>
              <a href="/ritmo/english/" class="header-lang-en-link" lang="en">En</a>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Meny"><span>Meny</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/for-ansatte/">For ansatte</a></li>
            <li class="my-studies"><a href="https://minestudier.no/nb/index.html">Mine studier</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">S?k i nettsidene til UiO</label>
            
            <button type="submit">S?k</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="ritmo parent-folder">
  <a href="/ritmo/">澳门皇冠体育,皇冠足球比分 RITMO</a>
    </li>
    <li class="om">
  <a href="/ritmo/om/">Om senteret</a>
    </li>
    <li class="vrtx-active-item personer vrtx-current-item" aria-current="page">
  <a href="/ritmo/personer/">Personer</a>
    </li>
    <li class="aktuelt">
  <a href="/ritmo/aktuelt/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="forskning">
  <a href="/ritmo/forskning/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="publikasjoner">
  <a href="/ritmo/publikasjoner/">Publikasjoner</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/">G? til uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Undermeny</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/personer/"><span>Personer</span></a></li>
            <li class="vrtx-parent" ><a href="/ritmo/personer/stipendiater/"><span>Ph.d.-stipendiater</span></a>

      <ul>
          <li class="vrtx-child"><a class="vrtx-marked" aria-current="page" href="/ritmo/personer/stipendiater/annammc/"><span>Anna-Maria Christodoulou</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="right-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Br?dsmulesti">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-3">
            <a href="/ritmo/personer/">Personer</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4 vrtx-breadcrumb-before-active">
            <a href="/ritmo/personer/stipendiater/">Ph.d.-stipendiater</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-5 vrtx-breadcrumb-active">Anna-Maria Christodoulou
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
      <div id="vrtx-content">
        <div id="vrtx-main-content">
          <h1>
      
        Anna-Maria Christodoulou
      </h1>
          
      
      
      
        
  <div id="vrtx-person-position">
    <span>
        Stipendiat
          -
        <a href="https://www.hf.uio.no/imv?vrtx=unit-view&amp;areacode=143695">RITMO (IMV) Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
    </span>
  </div>


      
          <div id="vrtx-person-contact-info-wrapper">
              
      
        
        
        
          
          
            
            
            
            
              <img class="vrtx-person-image" src="/ritmo/english/people/phd-fellows/annammc/testpic1.jpg" alt="Bilde av&nbsp;Anna-Maria&nbsp;Christodoulou" loading="lazy"/>
            
          
        
      
              
      <div class="vrtx-person-contactinfo">
        
        
        

          
	<span id="vrtx-person-change-language-link">
	  <a href="/ritmo/english/people/phd-fellows/annammc/index.html">English<span class="offscreen-screenreader"> version of this page</span></a>
	</span>


          
            <div class="vrtx-person-contact-info-line vrtx-email"><span class="vrtx-label">E-post</span>
              
                <a class="vrtx-value" href="mailto:a.m.christodoulou@imv.uio.no">a.m.christodoulou@imv.uio.no</a>
              
            </div>
          
          
          
          
          
          
            <div class="vrtx-person-contact-info-line vrtx-username">
              <span class="vrtx-label">Brukernavn</span>
              
                  <div class="vrtx-login">
    <a href="/ritmo/personer/stipendiater/annammc/index.html?vrtx=login&amp;amp;authTarget" rel="nofollow">Logg inn</a>
  </div>

              
            </div>
          
          
            
              <div class="vrtx-person-visiting-address"><span class="vrtx-label">Bes?ksadresse</span>
                
                  <span class="vrtx-address-line">澳门皇冠体育,皇冠足球比分sv. 3A</span>
                
                  <span class="vrtx-address-line">Harald Schjelderups hus</span>
                
                  <span class="vrtx-address-line">0373 Oslo</span>
                
              </div>
            
          
          
            <div class="vrtx-person-postal-address"><span class="vrtx-label"> Postadresse</span>
              
                <span class="vrtx-address-line">Postboks 1133 Blindern</span>
              
                <span class="vrtx-address-line">0318 Oslo</span>
              
            </div>
          
          
            


  <div class="vrtx-person-other-units">
    <span class="vrtx-label">Andre tilknytninger</span>
        <span class="vrtx-value">
          <a href="https://www.hf.uio.no">Det humanistiske fakultet</a>
          (Student)
        </span>
        <span class="vrtx-value">
          <a href="/link">LINK-Senter for l?ring og utdanning</a>
          (Student)
        </span>
  </div>


          
        
      </div>
              
      <div id="vrtx-person-contact-info-extras">
        
          <a id="vrtx-press-photo" href="  /ritmo/english/people/phd-fellows/annammc/testpic1.jpg?alt=original&amp;vrtx=view-as-webpage
">Pressebilde</a>
        
        
          <a id="vrtx-person-vcard" href="/ritmo/personer/stipendiater/annammc?vrtx=vcf">Last ned visittkort</a>
        
      </div>
              <div class="vrtx-person-contact-info-wrapper-end"></div>
          </div>
          <div id="vrtx-person-main-content-wrapper">
            <div class="vrtx-article-body">
              <h2>Faglige interesser</h2><p>Min forskning dreier seg om bruk og utvikling av intelligente systemer som hjelper musikkanalyse. Jeg er generelt interessert i feltet MIR (Music Information Retrieval) og mer spesifikt, i l?pet av doktorgraden min har jeg tenkt ? lage AI-assisterte verkt?y for multimodal musikkanalyse.</p><h2>Bakgrunn</h2><p>Jeg har et diplom i musikkteori og harmoni og en mastergrad i musikkteknologi, med fokus p? beregningsmusikkanalyse og m?nsterdeteksjon.</p>
            </div>
            

            
      
      
      
      
      
      
        
        
      

      
      

      
        



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publikasjoner</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Vitenskapelige artikler og bokkapitler</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">B?ker</a></li>
            <li><a href="#vrtx-publication-tab-3" name="vrtx-publication-tab-3">Andre</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10299007" class="vrtx-external-publication">
        <div id="vrtx-publication-10299007">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10299007">
                Christodoulou, Anna-Maria; Arnim, Hugh Alexander von &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Supporting Narrative Comprehension in Programmatic Music through Music and Light.
                </span>
                    <span class="vrtx-parent-contributors">
                            I McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">s. 447–454.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17496754">10.5281/zenodo.17496754</a>.
            <a href="https://hdl.handle.net/11250/5330619">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Programmatic music, such as Tchaikovsky’s Overture Romeo and Juliet, relies on the audience’s ability to associate musical motifs with narrative elements. This is a demanding task for less experienced listeners, particularly when cues are subtle, such as those conveyed through timbre. This paper explores how dynamic stage lighting, driven by physiological signals, can enhance narrative comprehension in orchestral performance. Using the LightHearted interactive lighting system, different characters of the Overture were mapped to distinct colored lights, whose intensities were dynamically modulated in real time by the heart rates of the conductor and selected musicians. This integration aimed to convey subtle narrative cues to the audience in real time. Audience feedback suggests that this approach not only clarifies musical narratives but also enhances the overall experience.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254559" class="vrtx-external-publication">
        <div id="vrtx-publication-10254559">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254559">
                Christodoulou, Anna-Maria &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Multimodal Dataset of Greek Folk Music.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Luca, Elsa De (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    DLfM &#39;25: Proceedings of the 12th International Conference on Digital Libraries for Musicology.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720833.</span>
                            
                <span class="vrtx-pages">s. 19–27.</span>
            doi: <a href="https://doi.org/https:/dl.acm.org/doi/10.1145/3748336.3748339">https:/dl.acm.org/doi/10.1145/3748336.3748339</a>.
            <a href="https://hdl.handle.net/11250/4911355">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper presents a multimodal dataset of Greek folk dance music, focusing on syrtos and balos. Developed to support research in computational musicology, the dataset improves access to Greek musical heritage through manually transcribed MIDI scores, aligned lyrics, and rich metadata, all curated by expert musicologists. Through pattern analysis and feature extraction, we examine both shared melodic structures and unique characteristics of each dance, with some examples reflecting traces of oral transmission. While metadata accompanies the collection to support organization and context, our primary emphasis is on the musical and lyrical content. This work contributes to digital ethnomusicology by showing how multimodal datasets of folk music can inform both analytical research and cultural heritage preservation.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10253409" class="vrtx-external-publication">
        <div id="vrtx-publication-10253409">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10253409">
                Christodoulou, Anna-Maria; Glette, Kyrre; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusiQAl: A Dataset for Music Question–Answering through Audio–Video Fusion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Transactions of the International Society for Music Information Retrieval.
                </span>
                            8(1),
                <span class="vrtx-pages">s. 265–282.</span>
            doi: <a href="https://doi.org/10.5334/tismir.222">10.5334/tismir.222</a>.
            <a href="https://hdl.handle.net/11250/4732806">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Music question–answering (MQA) is a machine learning task where a computational system analyzes and answers questions about music?related data. Traditional methods prioritize audio, overlooking visual and embodied aspects crucial to music performance understanding. We introduce MusiQAl, a multimodal dataset of 310 music performance videos and 11,793 human?annotated question–answer pairs, spanning diverse musical traditions and styles. Grounded in musicology and music psychology, MusiQAl emphasizes multimodal reasoning, causal inference, and cross?cultural understanding of performer–music interaction. We benchmark AVST and LAVISH architectures on MusiQAI, revealing strengths and limitations, underscoring the importance of integrating multimodal learning and domain expertise to advance MQA and music information retrieval.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2292157" class="vrtx-external-publication">
        <div id="vrtx-publication-2292157">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292157">
                Guo, Jinyue; Christodoulou, Anna-Maria; Laczko, Balint &amp; Glette, Kyrre
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        LVNS-RAVE: Diversified audio generation with RAVE and Latent Vector Novelty Search.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Li, Xiaodong &amp; Handl, Julia (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    GECCO &#39;24 Companion: Proceedings of the Genetic and Evolutionary Computation Conference Companion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400704956.</span>
                            
                <span class="vrtx-pages">s. 667–670.</span>
            doi: <a href="https://doi.org/10.1145/3638530.3654432">10.1145/3638530.3654432</a>.
            <a href="https://hdl.handle.net/11250/3455371">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Evolutionary Algorithms and Generative Deep Learning have been two of the most powerful tools for sound generation tasks. However, they have limitations: Evolutionary Algorithms require complicated designs, posing challenges in control and achieving realistic sound generation. Generative Deep Learning models often copy from the dataset and lack creativity. In this paper, we propose LVNS-RAVE, a method to combine Evolutionary Algorithms and Generative Deep Learning to produce realistic and novel sounds. We use the RAVE model as the sound generator and the VGGish model as a novelty evaluator in the Latent Vector Novelty Search (LVNS) algorithm. The reported experiments show that the method can successfully generate diversified, novel audio samples under different mutation setups using different pre-trained RAVE models. The characteristics of the generation process can be easily controlled with the mutation parameters. The proposed algorithm can be a creative tool for sound artists and musicians.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2356535" class="vrtx-external-publication">
        <div id="vrtx-publication-2356535">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2356535">
                Christodoulou, Anna-Maria; Dutta, Sagar; Lartillot, Olivier Serge Gabriel; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Convolutional Neural Network Models for Multimodal Classification of Expressive Piano Performance,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9789893520758.</span>
                            
            
            <a href="https://hdl.handle.net/10852/118901">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper addresses improving performance analysis by automating the recognition of expressive performance styles. We propose a multimodal fusion approach integrating audio, video, and motion data. We demonstrate the effectiveness of our approach by utilizing convolutional neural network (CNN) models. Training is done on a classical piano dataset of 211 excerpts containing audio, video, MIDI, and motion capture data. The results highlight the robustness of the CNN models; they achieve high accuracy even when trained on a limited dataset. Our study contributes to advancing the field of performance analysis by applying deep learning techniques to multimodal data.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2295084" class="vrtx-external-publication">
        <div id="vrtx-publication-2295084">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2295084">
                Christodoulou, Anna-Maria; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Multimodal music datasets? Challenges and future goals in music processing.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        International Journal of Multimedia Information Retrieval.
                </span>
                <span class="vrtx-issn">ISSN 2192-6611.</span>
                            13(3).
            doi: <a href="https://doi.org/10.1007/s13735-024-00344-6">10.1007/s13735-024-00344-6</a>.
            <a href="https://hdl.handle.net/10852/118423">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The term “multimodal music dataset” is often used to describe music-related datasets that represent music as a multimedia art form and multimodal experience. However, the term “multimodality” is often used differently in disciplines such as musicology, music psychology, and music technology. This paper proposes a definition of multimodality that works across different music disciplines. Many challenges are related to constructing, evaluating, and using multimodal music datasets. We provide a task-based categorization of multimodal datasets and suggest guidelines for their development. Diverse data pre-processing methods are illuminated, highlighting their contributions to transparent and reproducible music analysis. Additionally, evaluation metrics, methods, and benchmarks tailored for multimodal music processing tasks are scrutinized, empowering researchers to make informed decisions and facilitating cross-study comparisons.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200397" class="vrtx-external-publication">
        <div id="vrtx-publication-2200397">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200397">
                Riaz, Maham &amp; Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Using SuperCollider with OSC Commands for Spatial Audio Control in a Multi-Speaker Setup.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Andreopoulou, Areti &amp; Boren, Braxton (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 155th Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781713894667.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3399808">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">With the ever-increasing prevalence of technology, its application in various music-related processes, such as music composition and performance, has become increasingly prominent. One fascinating area where technology finds utility is in music performance, offering opportunities for extensive sound exploration and manipulation. In this paper, we introduce an approach utilizing SuperCollider and Open Sound Control (OSC) commands in a multi-speaker setup, enabling spatial audio control for a truly interactive audio spatialization experience. We delve into the musicological dimensions of these distinct methods, examining their integration within a live performance setting to uncover their artistic and expressive potential. By merging technology and musicology, our research aims to unlock new avenues for immersive and captivating musical experiences.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1578841">Se alle arbeider i NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10287052" class="vrtx-external-publication">
        <div id="vrtx-publication-10287052">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10287052">
                Arnim, Hugh Alexander von; Fleckenstein, Abbigail Marie &amp; Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        SysMus25 Conference Proceedings.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-ANTHOLOGYACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=440840CC-E3EB-4A75-9BC3-D1B3A363C297">Zenodo</a>.
                </span>
                            
                <span class="vrtx-pages">164 s.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17632991">10.5281/zenodo.17632991</a>.
            <a href="https://hdl.handle.net/11250/5320307">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10284067" class="vrtx-external-publication">
        <div id="vrtx-publication-10284067">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10284067">
                Fleckenstein, Abbigail Marie; Arnim, Hugh Alexander von &amp; Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        SysMus25 Book of Abstracts.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-ANTHOLOGYACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=440840CC-E3EB-4A75-9BC3-D1B3A363C297">Zenodo</a>.
                </span>
                            
                <span class="vrtx-pages">165 s.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17531650">10.5281/zenodo.17531650</a>.
            <a href="https://hdl.handle.net/11250/5317799">Fulltekst i vitenarkiv</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1578841">Se alle arbeider i NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-3">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10327939" class="vrtx-external-publication">
        <div id="vrtx-publication-10327939">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10327939">
                Fleckenstein, Abbigail Marie; Arnim, Hugh Alexander von &amp; Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Editorial Note.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Arnim, Hugh Alexander von; Fleckenstein, Abbigail Marie &amp; Christodoulou, Anna-Maria (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-other">
                    SysMus25 Conference Proceedings.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-INTRODUCTION">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=440840CC-E3EB-4A75-9BC3-D1B3A363C297">Zenodo</a>.
                </span>
                            
                <span class="vrtx-pages">s. 5–5.</span>
            
            <a href="https://hdl.handle.net/11250/5353906">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10298999" class="vrtx-external-publication">
        <div id="vrtx-publication-10298999">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298999">
                Christodoulou, Anna-Maria; Arnim, Hugh Alexander von &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Supporting Narrative Comprehension in Programmatic Music through Music and Light.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5330610">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283712" class="vrtx-external-publication">
        <div id="vrtx-publication-10283712">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283712">
                Arnim, Hugh Alexander von; Christodoulou, Anna-Maria; Burnim, Kayla; Upham, Finn; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        LightHearted—A Framework for Mapping ECG Signals to Light Parameters in Performing Arts.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5317546">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254572" class="vrtx-external-publication">
        <div id="vrtx-publication-10254572">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254572">
                Christodoulou, Anna-Maria; Glette, Kyrre; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusiQAl: Music Question Answering through Audio-Video fusion.
                </span>
                            
            doi: <a href="https://doi.org/https:/ismir2025.ismir.net/program-detailed-schedule">https:/ismir2025.ismir.net/program-detailed-schedule</a>.
            <a href="https://hdl.handle.net/11250/5061636">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254570" class="vrtx-external-publication">
        <div id="vrtx-publication-10254570">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254570">
                Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Benefits of Multimodal MIR in Computational Analysis of Music Therapy Improvisation.
                </span>
                            
            doi: <a href="https://doi.org/https:/ifas.thws.de/fileadmin/user_upload/250917_HIGH-M_Symposium_Programme_updated.pdf">https:/ifas.thws.de/fileadmin/user_upload/250917_HIGH-M_Symposium_Programme_updated.pdf</a>.
            <a href="https://hdl.handle.net/11250/3845187">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254562" class="vrtx-external-publication">
        <div id="vrtx-publication-10254562">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254562">
                Christodoulou, Anna-Maria &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Multimodal Dataset of Greek Folk Music.
                </span>
                            
            doi: <a href="https://doi.org/https:/dlfm.web.ox.ac.uk/2025-programme">https:/dlfm.web.ox.ac.uk/2025-programme</a>.
            <a href="https://hdl.handle.net/11250/3894993">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2296279" class="vrtx-external-publication">
        <div id="vrtx-publication-2296279">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2296279">
                Christodoulou, Anna-Maria; Dutta, Sagar; Lartillot, Olivier; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Convolutional Neural Network Models for Multimodal Classification of Expressive Piano Performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4642212">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2296281" class="vrtx-external-publication">
        <div id="vrtx-publication-2296281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2296281">
                Christodoulou, Anna-Maria &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Navigating Challenges in Multimodal Music Data Management for AI Systems.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5024521">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The responsible management of multimodal music datasets plays a crucial role in the development and evaluation of music processing systems. However, navigating the landscape of legal and ethical considerations can be a complex and challenging task due to the magnitude and diversity of such. This paper clarifies these divergent legal and ethical considerations and highlights the challenges associated with multimodality and AI systems. Focusing on the most crucial stages of multimodal music data management, we provide recommendations for tackling legal and ethical challenges. We emphasize the importance of establishing an inclusive and accessible music data environment, encouraging researchers and data users to adopt responsible approaches towards managing multimodal music data collections.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2191209" class="vrtx-external-publication">
        <div id="vrtx-publication-2191209">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2191209">
                Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Workshop on Introduction to Multimodal Music Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4790824">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2191210" class="vrtx-external-publication">
        <div id="vrtx-publication-2191210">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2191210">
                Christodoulou, Anna-Maria; Lartillot, Olivier &amp; Anagnostopoulou, Christina
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Analysis of Greek Folk Music of the Aegean.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4644595">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200586" class="vrtx-external-publication">
        <div id="vrtx-publication-2200586">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200586">
                Wosch, Thomas; Vobig, Bastian; Lartillot, Olivier &amp; Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        HIGH-M (Human Interaction assessment and Generative segmentation in Health and Music).
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5108559">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2077535" class="vrtx-external-publication">
        <div id="vrtx-publication-2077535">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077535">
                Lartillot, Olivier; God?y, Rolf Inge &amp; Christodoulou, Anna-Maria
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational detection and characterisation of sonic shapes: Towards a Toolbox des objets sonores.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4626053">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Computational detection and analysis of sound objects is of high importance both for musicology and sound design. Yet Music Information Retrieval technologies have so far been mostly focusing on transcription of music into notes in a classical sense whereas we are interested in detecting sound objects and their feature categories, as was suggested by Pierre Schaeffer’s typology and morphology of sound objects in 1966, reflecting basic sound-producing action types. We propose a signal-processing based approach for segmentation, based on a tracking of the salient characteristics over time, and dually Gestalt-based segmentation decisions based on changes. Tracking of pitched sound relies on partial tracking, whereas the analysis of noisy sound requires tracking of larger frequency bands possibly varying over time. The resulting sound objects are then described based on Schaeffer’s taxonomy and morphology, expressed first in the form of numerical descriptors, each related to one type of taxonomy (percussive/sustained/iterative, stable/moving pitch vs unclear pitch) or morphology (such as grain). This multidimensional feature representation is further divided into discrete categories related to the different classes of sounds. The typological and morphological categorisation is driven by the theoretical and experimental framework of the morphodynamical theory. We first experiment on isolated sounds from the Solfège des objets sonores—which features a large variety of sound sources—before considering more complex configurations featuring a succession of sound objects without silence or with simultaneous sound objects. Analytical results are visualised in the form of graphical representations, aimed both for musicology and music pedagogy purposes. This will be applied to the graphical descriptions of and browsing within large music catalogues. The application of the analytical descriptions to music creation is also investigated.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2077642" class="vrtx-external-publication">
        <div id="vrtx-publication-2077642">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077642">
                Christodoulou, Anna-Maria; Anagnostopoulou, Christina &amp; Lartillot, Olivier
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Analysis of Greek folk music of the Aegean islands.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        National and Kapodistrian University of Athens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3266277">Fulltekst i vitenarkiv</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1578841">Se alle arbeider i NVA</a></p>
    </div>

      </div>
    </div>



      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Publisert</span>
        <span class="published-date">31. jan. 2023 16:25 </span>
        
        - <span class="last-modified-date">Sist endret</span>
        <span class="last-modified-date">19. des. 2025 09:43</span>
        
        </div>
      
          </div>
        </div>
        <div id="vrtx-additional-content">
          
      
          

<div class="vrtx-projects vrtx-frontpage-box">
  <h2>Prosjekter</h2>

  <div class="vrtx-box-content">
  <ul class="only-links">
      <li><a href="/ritmo/prosjekter/musical-hci/index.html">Musikalsk menneske-maskin-interaksjon</a></li>
  </ul>

  </div>
</div>



          
          
      
      
        </div>
      </div>
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Kontakt</h2>
   <p><a href="/ritmo/om/">Kontakt oss</a><br>
   <a href="/om/finn-fram/omrader/gaustad/ga09/">Finn frem</a></p>
</div>
<div>
   <h2>Om nettstedet</h2>
   <p><a href="/om/regelverk/personvern/personvernerklering-nett.html">Bruk av informasjonskapsler</a><br>
   <a href="/ritmo/personer/stipendiater/annammc/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Tilgjengelighetserkl?ring</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Ansvarlig for denne siden</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/personer/stipendiater/annammc/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Logg inn
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/" title="G? til uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
