<!DOCTYPE html>
<html lang="en">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
      
        Anna-Maria Christodoulou
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="
      
        Anna-Maria Christodoulou
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Anna-Maria Christodoulou" />

    
      <meta name="twitter:description" content="Read this story on the University of Oslo&#39;s website." />
    

    
      <meta name="twitter:image" content="/ritmo/english/people/phd-fellows/annammc/testpic1.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/english/people/phd-fellows/annammc/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/english/people/phd-fellows/annammc/index.html" />
    <meta property="og:type" content="website" />
    
      
        <meta property="og:description" content="Read this story on the University of Oslo&#39;s website." />
      
    

    

    
      
      
        
        
          
            
            
              
              <meta property="og:image" content="/ritmo/english/people/phd-fellows/annammc/testpic1.jpg" />
              <meta property="og:image:width" content="200" />
              <meta property="og:image:height" content="300" />

              
                

                
                
                
                  
                

                
                
                
                <meta property="og:updated_time" content="1766133846" />
              
            
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context english faculty en '  id="vrtx-person">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Jump to content">
    <ul id="hidnav">
     <li><a href="#right-main">Jump to main content</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/english/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo/english" class="uio-host">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Language menu">
              <a href="/ritmo/" class="header-lang-no-link" lang="no">No</a>
              <span>En</span>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Menu"><span>Menu</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/english/for-employees/">For employees</a></li>
            <li class="my-studies"><a href="https://minestudier.no/en/index.html">My studies</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">Search our webpages</label>
            
            <button type="submit">Search</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="english parent-folder">
  <a href="/ritmo/english/">Home</a>
    </li>
    <li class="about">
  <a href="/ritmo/english/about/">About the Centre</a>
    </li>
    <li class="publications">
  <a href="/ritmo/english/publications/">Publications</a>
    </li>
    <li class="vrtx-active-item people vrtx-current-item" aria-current="page">
  <a href="/ritmo/english/people/">People</a>
    </li>
    <li class="news-and-events">
  <a href="/ritmo/english/news-and-events/">News and events</a>
    </li>
    <li class="research">
  <a href="/ritmo/english/research/">Research</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/english/">Go to uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Sub menu</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/english/people/"><span>People</span></a></li>
            <li class="vrtx-parent" ><a href="/ritmo/english/people/phd-fellows/"><span>PhD Fellows</span></a>

      <ul>
          <li class="vrtx-child"><a class="vrtx-marked" aria-current="page" href="/ritmo/english/people/phd-fellows/annammc/"><span>Anna-Maria Christodoulou</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="right-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Breadcrumbs">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4">
            <a href="/ritmo/english/people/">People</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-5 vrtx-breadcrumb-before-active">
            <a href="/ritmo/english/people/phd-fellows/">PhD Fellows</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-6 vrtx-breadcrumb-active">Anna-Maria Christodoulou
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
      <div id="vrtx-content">
        <div id="vrtx-main-content">
          <h1>
      
        Anna-Maria Christodoulou
      </h1>
          
      
      
      
        
  <div id="vrtx-person-position">
    <span>
        Doctoral Research Fellow
          -
        <a href="https://www.hf.uio.no/imv/english?vrtx=unit-view&amp;areacode=143695">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion (IMV)</a>
    </span>
  </div>


      
          <div id="vrtx-person-contact-info-wrapper">
              
      
        
        
        
          
          
            
            
            
            
              <img class="vrtx-person-image" src="/ritmo/english/people/phd-fellows/annammc/testpic1.jpg" alt="Image of&nbsp;Anna-Maria&nbsp;Christodoulou" loading="lazy"/>
            
          
        
      
              
      <div class="vrtx-person-contactinfo">
        
        
        

          
	<span id="vrtx-person-change-language-link">
	  <a href="/ritmo/personer/stipendiater/annammc/index.html">Norwegian<span class="offscreen-screenreader"> version of this page</span></a>
	</span>


          
            <div class="vrtx-person-contact-info-line vrtx-email"><span class="vrtx-label">Email</span>
              
                <a class="vrtx-value" href="mailto:a.m.christodoulou@imv.uio.no">a.m.christodoulou@imv.uio.no</a>
              
            </div>
          
          
          
          
          
          
            <div class="vrtx-person-contact-info-line vrtx-username">
              <span class="vrtx-label">Username</span>
              
                  <div class="vrtx-login">
    <a href="/ritmo/english/people/phd-fellows/annammc/index.html?vrtx=login&amp;amp;authTarget" rel="nofollow">Log in</a>
  </div>

              
            </div>
          
          
            
              <div class="vrtx-person-visiting-address"><span class="vrtx-label">Visiting address</span>
                
                  <span class="vrtx-address-line">澳门皇冠体育,皇冠足球比分sv. 3A</span>
                
                  <span class="vrtx-address-line">Harald Schjelderups hus</span>
                
                  <span class="vrtx-address-line">0373 Oslo</span>
                
              </div>
            
          
          
            <div class="vrtx-person-postal-address"><span class="vrtx-label"> Postal address</span>
              
                <span class="vrtx-address-line">Postboks 1133 Blindern</span>
              
                <span class="vrtx-address-line">0318 Oslo</span>
              
            </div>
          
          
            


  <div class="vrtx-person-other-units">
    <span class="vrtx-label">Other affiliations</span>
        <span class="vrtx-value">
          <a href="https://www.hf.uio.no/english">Faculty of Humanities</a>
          (Student)
        </span>
        <span class="vrtx-value">
          <a href="/link/english">Senter for l?ring og utdanning</a>
          (Student)
        </span>
  </div>


          
        
      </div>
              
      <div id="vrtx-person-contact-info-extras">
        
          <a id="vrtx-press-photo" href="  /ritmo/english/people/phd-fellows/annammc/testpic1.jpg?alt=original&amp;vrtx=view-as-webpage
">Press photo</a>
        
        
          <a id="vrtx-person-vcard" href="/ritmo/english/people/phd-fellows/annammc?vrtx=vcf">Download business card</a>
        
      </div>
              <div class="vrtx-person-contact-info-wrapper-end"></div>
          </div>
          <div id="vrtx-person-main-content-wrapper">
            <div class="vrtx-article-body">
              <h2>Academic interests</h2><p>My research revolves around using and developing intelligent systems that assist music analysis. My PhD is focused on the use and development of theories and tools for Multimodal MIR (Music Information Retrieval). At the same time, I am passionate about making music research accessible across different cultures, educational backgrounds, and abilities.?</p><h2>Background</h2><p>2014-2016: Diploma in Music Theory - Apollonian Conservatory</p><p>2020-2021: Minor in IT (specialized in Artificial Intelligence) - American College of Greece</p><p><span style="color:var(--textColor);font-family:var(--mainFontStack);">2017-2022: MA in Sound Acoustics and Music Technology - National and Kapodistrian University of Athens</span></p><p>?</p>
            </div>
            
  <span class="vrtx-tags">
      <span class="title">Tags:</span>
    <span class="vrtx-tags-links">
<a href="/english/?vrtx=tags&amp;tag=Music%20Information%20Retrieval&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Music Information Retrieval</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=artificial%20intelligence&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">artificial intelligence</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Computational%20Music%20Analysis&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Computational Music Analysis</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Multimodality&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Multimodality</a>
    </span>
  </span>

            
      
      
      
      
      
      
        
        
      

      
      

      
        



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publications</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Scientific articles and book chapters</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">Books</a></li>
            <li><a href="#vrtx-publication-tab-3" name="vrtx-publication-tab-3">Other</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10299007" class="vrtx-external-publication">
        <div id="vrtx-publication-10299007">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10299007">
                Christodoulou, Anna-Maria; Arnim, Hugh Alexander von &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Supporting Narrative Comprehension in Programmatic Music through Music and Light.
                </span>
                    <span class="vrtx-parent-contributors">
                            In McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">p. 447–454.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17509282">10.5281/zenodo.17509282</a>.
            <a href="https://hdl.handle.net/11250/5330619">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Programmatic music, such as Tchaikovsky’s Overture Romeo and Juliet, relies on the audience’s ability to associate musical motifs with narrative elements. This is a demanding task for less experienced listeners, particularly when cues are subtle, such as those conveyed through timbre. This paper explores how dynamic stage lighting, driven by physiological signals, can enhance narrative comprehension in orchestral performance. Using the LightHearted interactive lighting system, different characters of the Overture were mapped to distinct colored lights, whose intensities were dynamically modulated in real time by the heart rates of the conductor and selected musicians. This integration aimed to convey subtle narrative cues to the audience in real time. Audience feedback suggests that this approach not only clarifies musical narratives but also enhances the overall experience.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254559" class="vrtx-external-publication">
        <div id="vrtx-publication-10254559">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254559">
                Christodoulou, Anna-Maria &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Multimodal Dataset of Greek Folk Music.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Luca, Elsa De (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    DLfM &#39;25: Proceedings of the 12th International Conference on Digital Libraries for Musicology.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720833.</span>
                            
                <span class="vrtx-pages">p. 19–27.</span>
            doi: <a href="https://doi.org/https:/dl.acm.org/doi/10.1145/3748336.3748339">https:/dl.acm.org/doi/10.1145/3748336.3748339</a>.
            <a href="https://hdl.handle.net/11250/4911355">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper presents a multimodal dataset of Greek folk dance music, focusing on syrtos and balos. Developed to support research in computational musicology, the dataset improves access to Greek musical heritage through manually transcribed MIDI scores, aligned lyrics, and rich metadata, all curated by expert musicologists. Through pattern analysis and feature extraction, we examine both shared melodic structures and unique characteristics of each dance, with some examples reflecting traces of oral transmission. While metadata accompanies the collection to support organization and context, our primary emphasis is on the musical and lyrical content. This work contributes to digital ethnomusicology by showing how multimodal datasets of folk music can inform both analytical research and cultural heritage preservation.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10253409" class="vrtx-external-publication">
        <div id="vrtx-publication-10253409">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10253409">
                Christodoulou, Anna-Maria; Glette, Kyrre; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusiQAl: A Dataset for Music Question–Answering through Audio–Video Fusion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Transactions of the International Society for Music Information Retrieval.
                </span>
                            8(1),
                <span class="vrtx-pages">p. 265–282.</span>
            doi: <a href="https://doi.org/10.5334/tismir.222">10.5334/tismir.222</a>.
            <a href="https://hdl.handle.net/11250/4732806">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music question–answering (MQA) is a machine learning task where a computational system analyzes and answers questions about music?related data. Traditional methods prioritize audio, overlooking visual and embodied aspects crucial to music performance understanding. We introduce MusiQAl, a multimodal dataset of 310 music performance videos and 11,793 human?annotated question–answer pairs, spanning diverse musical traditions and styles. Grounded in musicology and music psychology, MusiQAl emphasizes multimodal reasoning, causal inference, and cross?cultural understanding of performer–music interaction. We benchmark AVST and LAVISH architectures on MusiQAI, revealing strengths and limitations, underscoring the importance of integrating multimodal learning and domain expertise to advance MQA and music information retrieval.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2292157" class="vrtx-external-publication">
        <div id="vrtx-publication-2292157">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292157">
                Guo, Jinyue; Christodoulou, Anna-Maria; Laczko, Balint &amp; Glette, Kyrre
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        LVNS-RAVE: Diversified audio generation with RAVE and Latent Vector Novelty Search.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Li, Xiaodong &amp; Handl, Julia (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    GECCO &#39;24 Companion: Proceedings of the Genetic and Evolutionary Computation Conference Companion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400704956.</span>
                            
                <span class="vrtx-pages">p. 667–670.</span>
            doi: <a href="https://doi.org/10.1145/3638530.3654432">10.1145/3638530.3654432</a>.
            <a href="https://hdl.handle.net/11250/3455371">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Evolutionary Algorithms and Generative Deep Learning have been two of the most powerful tools for sound generation tasks. However, they have limitations: Evolutionary Algorithms require complicated designs, posing challenges in control and achieving realistic sound generation. Generative Deep Learning models often copy from the dataset and lack creativity. In this paper, we propose LVNS-RAVE, a method to combine Evolutionary Algorithms and Generative Deep Learning to produce realistic and novel sounds. We use the RAVE model as the sound generator and the VGGish model as a novelty evaluator in the Latent Vector Novelty Search (LVNS) algorithm. The reported experiments show that the method can successfully generate diversified, novel audio samples under different mutation setups using different pre-trained RAVE models. The characteristics of the generation process can be easily controlled with the mutation parameters. The proposed algorithm can be a creative tool for sound artists and musicians.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2356535" class="vrtx-external-publication">
        <div id="vrtx-publication-2356535">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2356535">
                Christodoulou, Anna-Maria; Dutta, Sagar; Lartillot, Olivier Serge Gabriel; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Convolutional Neural Network Models for Multimodal Classification of Expressive Piano Performance,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9789893520758.</span>
                            
            
            <a href="https://hdl.handle.net/10852/118901">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper addresses improving performance analysis by automating the recognition of expressive performance styles. We propose a multimodal fusion approach integrating audio, video, and motion data. We demonstrate the effectiveness of our approach by utilizing convolutional neural network (CNN) models. Training is done on a classical piano dataset of 211 excerpts containing audio, video, MIDI, and motion capture data. The results highlight the robustness of the CNN models; they achieve high accuracy even when trained on a limited dataset. Our study contributes to advancing the field of performance analysis by applying deep learning techniques to multimodal data.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2295084" class="vrtx-external-publication">
        <div id="vrtx-publication-2295084">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2295084">
                Christodoulou, Anna-Maria; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Multimodal music datasets? Challenges and future goals in music processing.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        International Journal of Multimedia Information Retrieval.
                </span>
                <span class="vrtx-issn">ISSN 2192-6611.</span>
                            13(3).
            doi: <a href="https://doi.org/10.1007/s13735-024-00344-6">10.1007/s13735-024-00344-6</a>.
            <a href="https://hdl.handle.net/10852/118423">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The term “multimodal music dataset” is often used to describe music-related datasets that represent music as a multimedia art form and multimodal experience. However, the term “multimodality” is often used differently in disciplines such as musicology, music psychology, and music technology. This paper proposes a definition of multimodality that works across different music disciplines. Many challenges are related to constructing, evaluating, and using multimodal music datasets. We provide a task-based categorization of multimodal datasets and suggest guidelines for their development. Diverse data pre-processing methods are illuminated, highlighting their contributions to transparent and reproducible music analysis. Additionally, evaluation metrics, methods, and benchmarks tailored for multimodal music processing tasks are scrutinized, empowering researchers to make informed decisions and facilitating cross-study comparisons.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200397" class="vrtx-external-publication">
        <div id="vrtx-publication-2200397">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200397">
                Riaz, Maham &amp; Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Using SuperCollider with OSC Commands for Spatial Audio Control in a Multi-Speaker Setup.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Andreopoulou, Areti &amp; Boren, Braxton (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Engineering Society 155th Convention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=14B11A7E-F376-483D-8077-B3EFFB3CBDD2">Audio Engineering Society, Inc.</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781713894667.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3399808">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">With the ever-increasing prevalence of technology, its application in various music-related processes, such as music composition and performance, has become increasingly prominent. One fascinating area where technology finds utility is in music performance, offering opportunities for extensive sound exploration and manipulation. In this paper, we introduce an approach utilizing SuperCollider and Open Sound Control (OSC) commands in a multi-speaker setup, enabling spatial audio control for a truly interactive audio spatialization experience. We delve into the musicological dimensions of these distinct methods, examining their integration within a live performance setting to uncover their artistic and expressive potential. By merging technology and musicology, our research aims to unlock new avenues for immersive and captivating musical experiences.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1578841">View all works in NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10287052" class="vrtx-external-publication">
        <div id="vrtx-publication-10287052">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10287052">
                Arnim, Hugh Alexander von; Fleckenstein, Abbigail Marie &amp; Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        SysMus25 Conference Proceedings.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-ANTHOLOGYACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=440840CC-E3EB-4A75-9BC3-D1B3A363C297">Zenodo</a>.
                </span>
                            
                <span class="vrtx-pages">164 p.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17632991">10.5281/zenodo.17632991</a>.
            <a href="https://hdl.handle.net/11250/5320307">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10284067" class="vrtx-external-publication">
        <div id="vrtx-publication-10284067">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10284067">
                Fleckenstein, Abbigail Marie; Arnim, Hugh Alexander von &amp; Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        SysMus25 Book of Abstracts.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-ANTHOLOGYACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=440840CC-E3EB-4A75-9BC3-D1B3A363C297">Zenodo</a>.
                </span>
                            
                <span class="vrtx-pages">165 p.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17531650">10.5281/zenodo.17531650</a>.
            <a href="https://hdl.handle.net/11250/5317799">Full text in Research Archive</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1578841">View all works in NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-3">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10327939" class="vrtx-external-publication">
        <div id="vrtx-publication-10327939">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10327939">
                Fleckenstein, Abbigail Marie; Arnim, Hugh Alexander von &amp; Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Editorial Note.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Arnim, Hugh Alexander von; Fleckenstein, Abbigail Marie &amp; Christodoulou, Anna-Maria (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-other">
                    SysMus25 Conference Proceedings.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-INTRODUCTION">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=440840CC-E3EB-4A75-9BC3-D1B3A363C297">Zenodo</a>.
                </span>
                            
                <span class="vrtx-pages">p. 5–5.</span>
            
            <a href="https://hdl.handle.net/11250/5353906">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10298999" class="vrtx-external-publication">
        <div id="vrtx-publication-10298999">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298999">
                Christodoulou, Anna-Maria; Arnim, Hugh Alexander von &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Supporting Narrative Comprehension in Programmatic Music through Music and Light.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5330610">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283712" class="vrtx-external-publication">
        <div id="vrtx-publication-10283712">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283712">
                Arnim, Hugh Alexander von; Christodoulou, Anna-Maria; Burnim, Kayla; Upham, Finn; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        LightHearted—A Framework for Mapping ECG Signals to Light Parameters in Performing Arts.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5317546">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254572" class="vrtx-external-publication">
        <div id="vrtx-publication-10254572">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254572">
                Christodoulou, Anna-Maria; Glette, Kyrre; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusiQAl: Music Question Answering through Audio-Video fusion.
                </span>
                            
            doi: <a href="https://doi.org/https:/ismir2025.ismir.net/program-detailed-schedule">https:/ismir2025.ismir.net/program-detailed-schedule</a>.
            <a href="https://hdl.handle.net/11250/5061636">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254570" class="vrtx-external-publication">
        <div id="vrtx-publication-10254570">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254570">
                Christodoulou, Anna-Maria
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Benefits of Multimodal MIR in Computational Analysis of Music Therapy Improvisation.
                </span>
                            
            doi: <a href="https://doi.org/https:/ifas.thws.de/fileadmin/user_upload/250917_HIGH-M_Symposium_Programme_updated.pdf">https:/ifas.thws.de/fileadmin/user_upload/250917_HIGH-M_Symposium_Programme_updated.pdf</a>.
            <a href="https://hdl.handle.net/11250/3845187">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254562" class="vrtx-external-publication">
        <div id="vrtx-publication-10254562">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254562">
                Christodoulou, Anna-Maria &amp; Lartillot, Olivier
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Multimodal Dataset of Greek Folk Music.
                </span>
                            
            doi: <a href="https://doi.org/https:/dlfm.web.ox.ac.uk/2025-programme">https:/dlfm.web.ox.ac.uk/2025-programme</a>.
            <a href="https://hdl.handle.net/11250/3894993">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2296279" class="vrtx-external-publication">
        <div id="vrtx-publication-2296279">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2296279">
                Christodoulou, Anna-Maria; Dutta, Sagar; Lartillot, Olivier; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Convolutional Neural Network Models for Multimodal Classification of Expressive Piano Performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4642212">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2296281" class="vrtx-external-publication">
        <div id="vrtx-publication-2296281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2296281">
                Christodoulou, Anna-Maria &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Navigating Challenges in Multimodal Music Data Management for AI Systems.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5024521">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The responsible management of multimodal music datasets plays a crucial role in the development and evaluation of music processing systems. However, navigating the landscape of legal and ethical considerations can be a complex and challenging task due to the magnitude and diversity of such. This paper clarifies these divergent legal and ethical considerations and highlights the challenges associated with multimodality and AI systems. Focusing on the most crucial stages of multimodal music data management, we provide recommendations for tackling legal and ethical challenges. We emphasize the importance of establishing an inclusive and accessible music data environment, encouraging researchers and data users to adopt responsible approaches towards managing multimodal music data collections.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2191209" class="vrtx-external-publication">
        <div id="vrtx-publication-2191209">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2191209">
                Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Workshop on Introduction to Multimodal Music Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4790824">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2191210" class="vrtx-external-publication">
        <div id="vrtx-publication-2191210">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2191210">
                Christodoulou, Anna-Maria; Lartillot, Olivier &amp; Anagnostopoulou, Christina
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Analysis of Greek Folk Music of the Aegean.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4644595">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200586" class="vrtx-external-publication">
        <div id="vrtx-publication-2200586">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200586">
                Wosch, Thomas; Vobig, Bastian; Lartillot, Olivier &amp; Christodoulou, Anna-Maria
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        HIGH-M (Human Interaction assessment and Generative segmentation in Health and Music).
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5108559">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2077535" class="vrtx-external-publication">
        <div id="vrtx-publication-2077535">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077535">
                Lartillot, Olivier; God?y, Rolf Inge &amp; Christodoulou, Anna-Maria
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational detection and characterisation of sonic shapes: Towards a Toolbox des objets sonores.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4626053">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Computational detection and analysis of sound objects is of high importance both for musicology and sound design. Yet Music Information Retrieval technologies have so far been mostly focusing on transcription of music into notes in a classical sense whereas we are interested in detecting sound objects and their feature categories, as was suggested by Pierre Schaeffer’s typology and morphology of sound objects in 1966, reflecting basic sound-producing action types. We propose a signal-processing based approach for segmentation, based on a tracking of the salient characteristics over time, and dually Gestalt-based segmentation decisions based on changes. Tracking of pitched sound relies on partial tracking, whereas the analysis of noisy sound requires tracking of larger frequency bands possibly varying over time. The resulting sound objects are then described based on Schaeffer’s taxonomy and morphology, expressed first in the form of numerical descriptors, each related to one type of taxonomy (percussive/sustained/iterative, stable/moving pitch vs unclear pitch) or morphology (such as grain). This multidimensional feature representation is further divided into discrete categories related to the different classes of sounds. The typological and morphological categorisation is driven by the theoretical and experimental framework of the morphodynamical theory. We first experiment on isolated sounds from the Solfège des objets sonores—which features a large variety of sound sources—before considering more complex configurations featuring a succession of sound objects without silence or with simultaneous sound objects. Analytical results are visualised in the form of graphical representations, aimed both for musicology and music pedagogy purposes. This will be applied to the graphical descriptions of and browsing within large music catalogues. The application of the analytical descriptions to music creation is also investigated.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2077642" class="vrtx-external-publication">
        <div id="vrtx-publication-2077642">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2077642">
                Christodoulou, Anna-Maria; Anagnostopoulou, Christina &amp; Lartillot, Olivier
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational Analysis of Greek folk music of the Aegean islands.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        National and Kapodistrian University of Athens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3266277">Full text in Research Archive</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1578841">View all works in NVA</a></p>
    </div>

      </div>
    </div>



      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Published</span>
        <span class="published-date">Jan. 31, 2023 4:34 PM </span>
        
        - <span class="last-modified-date">Last modified</span>
        <span class="last-modified-date">Dec. 19, 2025 9:44 AM</span>
        
        </div>
      
          </div>
        </div>
        <div id="vrtx-additional-content">
          
      
          

<div class="vrtx-projects vrtx-frontpage-box">
  <h2>Projects</h2>

  <div class="vrtx-box-content">
  <ul class="only-links">
      <li><a href="/ritmo/english/projects/Bodies-in-Concert/index.html">Bodies in Concert</a></li>
      <li><a href="/ritmo/english/projects/musical-hci/index.html">Musical human-computer interaction</a></li>
  </ul>

  </div>
</div>



          
          
      
      
        </div>
      </div>
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo/english">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Contact information</h2>
   <p><a href="/ritmo/english/about/">Contact us</a><br>
   <a href="/english/about/getting-around/areas/gaustad/ga09/">Find us</a></p>
</div>
<div>
   <h2>About the website</h2>
   <p><a href="/english/about/regulations/privacy-declarations/privacy-policy-web.html">Cookies</a><br>
   <a href="/ritmo/english/people/phd-fellows/annammc/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Accessibility statement (in Norwegian only)</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Responsible for this page</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/english/people/phd-fellows/annammc/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Log in
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/english/" title="Go to uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
