<!DOCTYPE html>
<html lang="no">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
      
        Balint Laczko
       - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="
      
        Balint Laczko
       - RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Balint Laczko" />

    
      <meta name="twitter:description" content="Les denne saken p? UiOs nettsider." />
    

    
      <meta name="twitter:image" content="/ritmo/english/people/phd-fellows/balintl/balint-02.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/personer/stipendiater/balintl/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/personer/stipendiater/balintl/index.html" />
    <meta property="og:type" content="website" />
    
      
        <meta property="og:description" content="Les denne saken p? UiOs nettsider." />
      
    

    

    
      
      
        
        
          
            
            
              
              <meta property="og:image" content="/ritmo/english/people/phd-fellows/balintl/balint-02.jpg" />
              <meta property="og:image:width" content="1181" />
              <meta property="og:image:height" content="1772" />

              
                

                
                
                
                  
                

                
                
                
                <meta property="og:updated_time" content="1764011369" />
              
            
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context ritmo faculty no '  id="vrtx-person">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Hopp til innhold">
    <ul id="hidnav">
     <li><a href="#right-main">Hopp til hovedinnhold</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo" class="uio-host">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Spr?kmeny">
              <span>No</span>
              <a href="/ritmo/english/" class="header-lang-en-link" lang="en">En</a>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Meny"><span>Meny</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/for-ansatte/">For ansatte</a></li>
            <li class="my-studies"><a href="https://minestudier.no/nb/index.html">Mine studier</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">S?k i nettsidene til UiO</label>
            
            <button type="submit">S?k</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="ritmo parent-folder">
  <a href="/ritmo/">澳门皇冠体育,皇冠足球比分 RITMO</a>
    </li>
    <li class="om">
  <a href="/ritmo/om/">Om senteret</a>
    </li>
    <li class="vrtx-active-item personer vrtx-current-item" aria-current="page">
  <a href="/ritmo/personer/">Personer</a>
    </li>
    <li class="aktuelt">
  <a href="/ritmo/aktuelt/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="forskning">
  <a href="/ritmo/forskning/">澳门皇冠体育,皇冠足球比分</a>
    </li>
    <li class="publikasjoner">
  <a href="/ritmo/publikasjoner/">Publikasjoner</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/">G? til uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Undermeny</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/personer/"><span>Personer</span></a></li>
            <li class="vrtx-parent" ><a href="/ritmo/personer/stipendiater/"><span>Ph.d.-stipendiater</span></a>

      <ul>
          <li class="vrtx-child"><a class="vrtx-marked" aria-current="page" href="/ritmo/personer/stipendiater/balintl/"><span>Balint Laczko</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="right-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Br?dsmulesti">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-3">
            <a href="/ritmo/personer/">Personer</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4 vrtx-breadcrumb-before-active">
            <a href="/ritmo/personer/stipendiater/">Ph.d.-stipendiater</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-5 vrtx-breadcrumb-active">Balint Laczko
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
      <div id="vrtx-content">
        <div id="vrtx-main-content">
          <h1>
      
        Balint Laczko
      </h1>
          
      
      
      
        
  <div id="vrtx-person-position">
    <span>
        Stipendiat
          -
        <a href="https://www.hf.uio.no/imv?vrtx=unit-view&amp;areacode=143695">RITMO (IMV) Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
    </span>
  </div>


      
          <div id="vrtx-person-contact-info-wrapper">
              
      
        
        
        
          
          
            
            
            
            
              <img class="vrtx-person-image" src="/ritmo/english/people/phd-fellows/balintl/balint-02.jpg" alt="Bilde av&nbsp;Balint&nbsp;Laczko" loading="lazy"/>
            
          
        
      
              
      <div class="vrtx-person-contactinfo">
        
        
        

          
	<span id="vrtx-person-change-language-link">
	  <a href="/ritmo/english/people/phd-fellows/balintl/index.html">English<span class="offscreen-screenreader"> version of this page</span></a>
	</span>


          
            <div class="vrtx-person-contact-info-line vrtx-email"><span class="vrtx-label">E-post</span>
              
                <a class="vrtx-value" href="mailto:balint.laczko@medisin.uio.no">balint.laczko@medisin.uio.no</a>
              
            </div>
          
          
          
          
          
          
            <div class="vrtx-person-contact-info-line vrtx-username">
              <span class="vrtx-label">Brukernavn</span>
              
                  <div class="vrtx-login">
    <a href="/ritmo/personer/stipendiater/balintl/index.html?vrtx=login&amp;amp;authTarget" rel="nofollow">Logg inn</a>
  </div>

              
            </div>
          
          
            
              <div class="vrtx-person-visiting-address"><span class="vrtx-label">Bes?ksadresse</span>
                
                  <span class="vrtx-address-line">澳门皇冠体育,皇冠足球比分sv. 3A</span>
                
                  <span class="vrtx-address-line">Harald Schjelderups hus</span>
                
                  <span class="vrtx-address-line">0373 Oslo</span>
                
              </div>
            
          
          
            <div class="vrtx-person-postal-address"><span class="vrtx-label"> Postadresse</span>
              
                <span class="vrtx-address-line">Postboks 1133 Blindern</span>
              
                <span class="vrtx-address-line">0318 Oslo</span>
              
            </div>
          
          
            


  <div class="vrtx-person-other-units">
    <span class="vrtx-label">Andre tilknytninger</span>
        <span class="vrtx-value">
          <a href="https://www.hf.uio.no">Det humanistiske fakultet</a>
          (Student)
        </span>
        <span class="vrtx-value">
          <a href="/link">LINK-Senter for l?ring og utdanning</a>
          (Student)
        </span>
  </div>


          
        
      </div>
              
      <div id="vrtx-person-contact-info-extras">
        
          <a id="vrtx-press-photo" href="  /ritmo/english/people/phd-fellows/balintl/balint-02.jpg?alt=original&amp;vrtx=view-as-webpage
">Pressebilde</a>
        
        
          <a id="vrtx-person-vcard" href="/ritmo/personer/stipendiater/balintl?vrtx=vcf">Last ned visittkort</a>
        
      </div>
              <div class="vrtx-person-contact-info-wrapper-end"></div>
          </div>
          <div id="vrtx-person-main-content-wrapper">
            <div class="vrtx-article-body">
              <h2>Faglige interesser</h2>

<p>PhD-prosjektet mitt handler om bildesonifisering. Dette er en del av <a href="/english/research/strategic-research-areas/life-science/research/convergence-environments/autorhythm/">AUTORHYHM</a>-prosjektet, et 澳门皇冠体育,皇冠足球比分 for forskere innen biologi, maskinl?ring, matematikk og musikkteknologi som studerer prosessen med autofagi.</p>

<h2>Bakgrunn</h2>

<p>Jeg har tatt mastergrad i klassisk komposisjon (2017, Liszt Academy of Music, Budapest) og i ytelsesteknologi og elektroakustisk komposisjon (2021, Norges Musikkh?gskole, Oslo). Jeg har jobbet innen 3D-lyd, audiovisuell analyse, dyp l?ring og computer vision. <a href="https://balintlaczko.com/">Her er min personlige nettside</a>.</p>

            </div>
            
  <span class="vrtx-tags">
      <span class="title">Emneord:</span>
    <span class="vrtx-tags-links">
<a href="/?vrtx=tags&amp;tag=Music%20Technology&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Music Technology</a><span class="tag-separator">,</span>
<a href="/?vrtx=tags&amp;tag=Sonification&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Sonification</a><span class="tag-separator">,</span>
<a href="/?vrtx=tags&amp;tag=interaction&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">interaction</a>
    </span>
  </span>

            
      
      
      
      
      
      
        
        
      

      
      

      
        



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publikasjoner</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Vitenskapelige artikler og bokkapitler</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">Andre</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10281258" class="vrtx-external-publication">
        <div id="vrtx-publication-10281258">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10281258">
                Laczko, Balint; Rognes, Marie Elisabeth &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Image Sonification as Unsupervised Domain Transfer.
                </span>
                    <span class="vrtx-parent-contributors">
                            I McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">s. 596–607.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17497987">10.5281/zenodo.17497987</a>.
            <a href="https://hdl.handle.net/11250/5278313">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The process of image sonification maps visual features into perceived auditory features. Most established sonification methods rely on identifying salient visual features in the input data and then mapping their distribution to a proportional distribution of auditory features. However, this approach requires both domain expertise and manual feature engineering. Here, we propose a new method of image sonification, leveraging recent advances in representation learning and domain transfer. Our approach introduces a pair of variational auto-encoder models that learn disentangled latent representations of the images and sounds, respectively, and a separate network that maps between these representations. The resulting sonification system encodes images into the latent space and then decodes them as sounds. Both representations and their mapping are learned in an entirely unsupervised manner. When evaluating the system in an interactive real-time setting, we observed that the model successfully learned disentangled representations of image and sound factors in our synthetic datasets.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254521" class="vrtx-external-publication">
        <div id="vrtx-publication-10254521">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254521">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Pixasonics: An Image Sonification Toolbox for Python.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Cardoso, F. Amílcar; Vickers, Paul; Martins, Pedro &amp; Roddy, Stephen (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 30th International Conference on Auditory Display (ICAD 2025).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=E7F15E71-C7FE-4CDA-A5F2-F71F96B5254A">Department of Informatics Engineering, University of Coimbra, Portugal</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798991456210.</span>
                            
                <span class="vrtx-pages">s. 28–35.</span>
            doi: <a href="https://doi.org/https:/hdl.handle.net/1853/79958">https:/hdl.handle.net/1853/79958</a>.
            <a href="https://hdl.handle.net/11250/4102869">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Pixasonics is a new Python library for interactive image analysis and exploration through image sonification. It uses real-time audio and visualization to help uncover patterns in image data. With Pixasonics, users can launch one or more small web applications (running in a Jupyter Notebook), probe image data using various feature extraction methods, and map those feature vectors to synthesis parameters. The target users are researchers interested in exploring image and volumetric data and creative users who want an intuitive tool for experimental sound design. Pixasonics’ design aims to strike a balance between an easy-to-use web application with minimal boilerplate code necessary and a library that can be integrated into more advanced workflows. Real-time exploration is at the heart, but it can also be used to script non-real-time sonifications of large datasets. This paper presents Pixasonics, its structure, interface, and advanced features, and discusses preliminary feedback from biology researchers and music technologists.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2292157" class="vrtx-external-publication">
        <div id="vrtx-publication-2292157">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292157">
                Guo, Jinyue; Christodoulou, Anna-Maria; Laczko, Balint &amp; Glette, Kyrre
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        LVNS-RAVE: Diversified audio generation with RAVE and Latent Vector Novelty Search.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Li, Xiaodong &amp; Handl, Julia (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    GECCO &#39;24 Companion: Proceedings of the Genetic and Evolutionary Computation Conference Companion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400704956.</span>
                            
                <span class="vrtx-pages">s. 667–670.</span>
            doi: <a href="https://doi.org/10.1145/3638530.3654432">10.1145/3638530.3654432</a>.
            <a href="https://hdl.handle.net/11250/3455371">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Evolutionary Algorithms and Generative Deep Learning have been two of the most powerful tools for sound generation tasks. However, they have limitations: Evolutionary Algorithms require complicated designs, posing challenges in control and achieving realistic sound generation. Generative Deep Learning models often copy from the dataset and lack creativity. In this paper, we propose LVNS-RAVE, a method to combine Evolutionary Algorithms and Generative Deep Learning to produce realistic and novel sounds. We use the RAVE model as the sound generator and the VGGish model as a novelty evaluator in the Latent Vector Novelty Search (LVNS) algorithm. The reported experiments show that the method can successfully generate diversified, novel audio samples under different mutation setups using different pre-trained RAVE models. The characteristics of the generation process can be easily controlled with the mutation parameters. The proposed algorithm can be a creative tool for sound artists and musicians.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307227" class="vrtx-external-publication">
        <div id="vrtx-publication-2307227">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307227">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Synth Maps: Mapping The Non-Proportional Relationships Between Synthesizer Parameters and Synthesized Sound.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Ziemer, Tim; Kantan, Prithvi Ravi; Chabot, Samuel &amp; Braasch, Jonas (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 29th International Conference on Auditory Display (ICAD 2024).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=19E8936D-AD3D-4A69-B382-47B80A63520A">The International Community for Auditory Display</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798991456203.</span>
                            
                <span class="vrtx-pages">s. 181–184.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.11237788">10.5281/zenodo.11237788</a>.
            <a href="https://hdl.handle.net/10852/114155">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Parameter Mapping (PM) is probably the most used design approach in sonification. However, the relationship between a synthesizer’s input parameters and the perceptual distribution of its output sounds might not be proportional, limiting its ability to convey relationships within the source data in the sound. This study evaluates a basic Frequency Modulation (FM) synthesis module with perceptually motivated descriptors, measures of spectral energy distribution, and latent embeddings of pre-trained audio representation models. We demonstrate how these metrics do not indicate straightforward relationships between synthesis parameters and perceived sound. This is done using interactive audiovisual scatter plots—Synth Maps—that can be used to explore the sound distribution of the synthesizer and qualitatively evaluate how well
the different representations align with human perception. Link to the code and the interactive Synth Maps are available.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307232" class="vrtx-external-publication">
        <div id="vrtx-publication-2307232">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307232">
                Laczko, Balint
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Hum: 3D Audiovisual Live Performance for Projection and Spatial Audio.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Ziemer, Tim; Kantan, Prithvi Ravi; Chabot, Samuel &amp; Braasch, Jonas (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 29th International Conference on Auditory Display (ICAD 2024).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=19E8936D-AD3D-4A69-B382-47B80A63520A">The International Community for Auditory Display</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798991456203.</span>
                            
                <span class="vrtx-pages">s. 201–203.</span>
            
            <a href="https://hdl.handle.net/10852/114246">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The Hum is a scalable audiovisual live performance instrument. It is a complete world full of sounding objects that interact with each other. It is based on a fictional story. The Hum currently contains around five thousand musical phrases. 

The sound dataset was then analysed with a bespoke set of descriptors, and the resulting representation was projected into 3D space using UMAP. The live performance involves exploring this 3D space both in terms of real-time visuals, and in a spatial audio setup. At its core the performance is the sonification of the various timbral and musical relationships of the sound dataset, expressed by various interactions triggered between neighboring sounds or using the whole dataset. The interactions may evoke associations of weather (blowing wind, rain or chain lightning). 

The structure of the performance follows a 3-stage plan, that is realized via improvisation. Thus the duration of the performance is flexible and can last between 15 to 30 minutes. The Hum is realized entirely in Max/MSP.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1954360" class="vrtx-external-publication">
        <div id="vrtx-publication-1954360">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954360">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reflections on the Development of the Musical Gestures Toolbox for Python.
                </span>
                    <span class="vrtx-parent-contributors">
                            I Kantan, Prithvi Ravi; Paisa, Razvan &amp; Willemsen, Silvin (Red.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Nordic Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=2B38F065-B3E6-4061-9F0C-0BA1287EEAFF">Aalborg Universitetsforlag</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/89331">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents the Musical Gestures Toolbox (MGT) for Python, a collection of modules targeted at researchers working with video recordings. The toolbox includes video visualization techniques such as creating motion videos, motion history images, and motiongrams. These visualizations allow for studying video recordings from different temporal and spatial perspectives. The toolbox also includes basic computer vision methods, and it is designed to integrate well with audio analysis toolboxes. The MGT was initially developed to analyze music-related body motion (of musicians, dancers, and perceivers) but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, pedagogy, psychology, and medicine.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1514417">Se alle arbeider i NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10281288" class="vrtx-external-publication">
        <div id="vrtx-publication-10281288">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10281288">
                Laczko, Balint; Rognes, Marie Elisabeth &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Poster for &quot;Image Sonification as Unsupervised Domain Transfer&quot;.
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.17513165">10.5281/zenodo.17513165</a>.
            <a href="https://hdl.handle.net/11250/5278360">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The process of image sonification maps visual features into perceived auditory features. Most established sonification methods rely on identifying salient visual features in the input data and then mapping their distribution to a proportional distribution of auditory features. However, this approach requires both domain expertise and manual feature engineering. Here, we propose a new method of image sonification, leveraging recent advances in representation learning and domain transfer. Our approach introduces a pair of variational auto-encoder models that learn disentangled latent representations of the images and sounds, respectively, and a separate network that maps between these representations. The resulting sonification system encodes images into the latent space and then decodes them as sounds. Both representations and their mapping are learned in an entirely unsupervised manner. When evaluating the system in an interactive real-time setting, we observed that the model successfully learned disentangled representations of image and sound factors in our synthetic datasets.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254529" class="vrtx-external-publication">
        <div id="vrtx-publication-10254529">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254529">
                Laczko, Balint
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Presentation of PhD project: Perceptually Aligned Deep Image Sonification.
                </span>
                            
            doi: <a href="https://doi.org/https:/comma.eecs.qmul.ac.uk/creative-audio-synthesis-and-interfaces-workshop/">https:/comma.eecs.qmul.ac.uk/creative-audio-synthesis-and-interfaces-workshop/</a>.
            <a href="https://hdl.handle.net/11250/4413384">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Imaging technology has dramatically expanded our understanding of biological systems. This overabundance of images has come with unique problems, such as visual overload, which can potentially obscure data relationships and induce eye fatigue or divert vision from important tasks. Image sonification offers potential solutions to these problems by channeling data into the auditory domain, leveraging our natural pattern recognition skills through hearing. In my PhD project I have been exploring the potential of Machine Learning in solving the two fundamental challenges of image sonification: the perceptually aligned representations of images and sounds, and the cross-modal mapping between them. In this talk I will present my journey through timbre spaces, Differentiable DSP, and cross-modal domain transfer in search of new methods for image sonification.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254527" class="vrtx-external-publication">
        <div id="vrtx-publication-10254527">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254527">
                Laczko, Balint
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        C2HO Workshop on Image Sonification with Pixasonics.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.hf.uio.no/imv/english/research/networks/creative-computing-hub-oslo/pages/c2ho-workshops/image-sonification-workshop.html">https:/www.hf.uio.no/imv/english/research/networks/creative-computing-hub-oslo/pages/c2ho-workshops/image-sonification-workshop.html</a>.
            <a href="https://hdl.handle.net/11250/4561182">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">How can we turn images into sound? And what can we learn about those images by listening? Why listen instead of looking? These are some questions Bálint Laczkó&#39;s research on image sonification aims to find answers to. Bálint has been researching applications of image sonification in biology and medical research and started working on a toolbox for Python, which he will be presenting during this workshop.

The toolbox originates from bio-medical research, but it can also be used creatively for sound design. Bring your laptop with Python (or Anaconda) installed, and some images you&#39;d like to squeeze some sounds out of!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254525" class="vrtx-external-publication">
        <div id="vrtx-publication-10254525">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254525">
                Laczko, Balint
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Insight Through Sound: Image Sonification in Biology Research.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.uio.no/forskning/satsinger/livsvitenskap/livsvitenskapskonferansen/2025/sidearrangementer/workshop/insight-through-sound-image-sonification-in-biolog.html">https:/www.uio.no/forskning/satsinger/livsvitenskap/livsvitenskapskonferansen/2025/sidearrangementer/workshop/insight-through-sound-image-sonification-in-biolog.html</a>.
            <a href="https://hdl.handle.net/11250/3879553">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254524" class="vrtx-external-publication">
        <div id="vrtx-publication-10254524">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254524">
                Laczko, Balint
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Oral presentation on ICAD 2025 about Pixasonics: An Image Sonification Toolbox for Python.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3604197">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Pixasonics is a new Python library for interactive image analysis and exploration through image sonification. It uses real-time audio and visualization to help uncover patterns in image data. With Pixasonics, users can launch one or more small web applications (running in a Jupyter Notebook), probe image data using various feature extraction methods, and map those feature vectors to synthesis parameters. The target users are researchers interested in exploring image and volumetric data and creative users who want an intuitive tool for experimental sound design. Pixasonics’ design aims to strike a balance between an easy-to-use web application with minimal boilerplate code necessary and a library that can be integrated into more advanced workflows. Real-time exploration is at the heart, but it can also be used to script non-real-time sonifications of large datasets. This paper presents Pixasonics, its structure, interface, and advanced features, and discusses preliminary feedback from biology researchers and music technologists.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2224205" class="vrtx-external-publication">
        <div id="vrtx-publication-2224205">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2224205">
                Jensenius, Alexander Refsum &amp; Laczko, Balint
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4759772">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is targeted at students and researchers working with video recordings. You will learn to use MG Toolbox, a Python package with numerous tools for visualizing and analyzing video recordings. This includes visualization techniques such as motion videos, motion history images, and motiongrams; techniques that, in different ways, allow for looking at video recordings from different temporal and spatial perspectives. It also includes some basic computer vision analysis, such as extracting quantity and centroid of motion, and using such features in analysis.MG Toolbox for Python is a collection of high-level modules for generating all of the above-mentioned visualizations and analyses. This toolbox was initially developed to analyze music-related body motion but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, psychology, medicine, and educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2306986" class="vrtx-external-publication">
        <div id="vrtx-publication-2306986">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2306986">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Poster for &quot;Synth Maps: Mapping The Non-Proportional Relationships Between Synthesizer Parameters and Synthesized Sound&quot;.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4710729">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Parameter Mapping (PM) is probably the most used design approach in sonification. However, the relationship between a synthesizer’s input parameters and the perceptual distribution of its output sounds might not be proportional, limiting its ability to convey relationships within the source data in the sound. This study evaluates a basic Frequency Modulation (FM) synthesis module with perceptually motivated descriptors, measures of spectral energy distribution, and latent embeddings of pre-trained audio representation models. We demonstrate how these metrics do not indicate straightforward relationships between synthesis parameters and perceived sound. This is done using interactive audiovisual scatter plots—Synth Maps—that can be used to explore the sound distribution of the synthesizer and qualitatively evaluate how well
the different representations align with human perception. Link to the code and the interactive Synth Maps are available.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307105" class="vrtx-external-publication">
        <div id="vrtx-publication-2307105">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307105">
                Laczko, Balint
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Two-part guest lecture about spatial audio and Ambisonics for MCT students.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3607744">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200473" class="vrtx-external-publication">
        <div id="vrtx-publication-2200473">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200473">
                Laczko, Balint
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Workshop on Spatial Audio and Ambisonics.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4745250">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200502" class="vrtx-external-publication">
        <div id="vrtx-publication-2200502">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200502">
                Laczko, Balint
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Online guest lecture about The Hum - a real-time 3D audiovisual performance in Max.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5059969">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200497" class="vrtx-external-publication">
        <div id="vrtx-publication-2200497">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200497">
                Laczko, Balint
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Two-part guest lecture about spatial audio and Ambisonics for MCT students.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3237902">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2200009" class="vrtx-external-publication">
        <div id="vrtx-publication-2200009">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200009">
                Laczko, Balint
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Guest lecture about granular synthesis with onset detection in Max.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3360539">Fulltekst i vitenarkiv</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2007982" class="vrtx-external-publication">
        <div id="vrtx-publication-2007982">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2007982">
                Outa, Amani al; Kn?velsrud, Helene; Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Winner of RRI-inspired transdisciplinary side quest call.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Centre for Digital Life Norway.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4689931">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">Centre for Digital Life Norway (DLN) is excited to congratulate the team behind the project “The autophagic symphony – Unveiling the final rhythm” as winner of DLN’s RRI-inspired transdisciplinary side quest call.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1954357" class="vrtx-external-publication">
        <div id="vrtx-publication-1954357">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954357">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reflections on the Development of the Musical Gestures Toolbox for Python.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4814723">Fulltekst i vitenarkiv</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Vis sammendrag" class="vrtx-publication-summary">Vis sammendrag</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents the Musical Gestures Toolbox (MGT) for Python, a collection of modules targeted at researchers working with video recordings. The toolbox includes video visualization techniques such as creating motion videos, motion history images, and motiongrams. These visualizations allow for studying video recordings from different temporal and spatial perspectives. The toolbox also includes basic computer vision methods, and it is designed to integrate well with audio analysis toolboxes. The MGT was initially developed to analyze music-related body motion (of musicians, dancers, and perceivers) but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, pedagogy, psychology, and medicine.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1514417">Se alle arbeider i NVA</a></p>
    </div>

      </div>
    </div>



      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Publisert</span>
        <span class="published-date">3. feb. 2023 17:18 </span>
        
        - <span class="last-modified-date">Sist endret</span>
        <span class="last-modified-date">24. nov. 2025 20:09</span>
        
        </div>
      
          </div>
        </div>
        <div id="vrtx-additional-content">
          
      
          

<div class="vrtx-projects vrtx-frontpage-box">
  <h2>Prosjekter</h2>

  <div class="vrtx-box-content">
  <ul class="only-links">
      <li><a href="/english/research/strategic-research-areas/life-science/research/convergence-environments/autorhythm/news/2022/2022_01_sonification-pilot.html">Sonification Pilot</a></li>
      <li><a href="/ritmo/prosjekter/bioRITMO/index.html">bioRITMO</a></li>
  </ul>

  </div>
</div>



          
          
      
      
        </div>
      </div>
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo">RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Kontakt</h2>
   <p><a href="/ritmo/om/">Kontakt oss</a><br>
   <a href="/om/finn-fram/omrader/gaustad/ga09/">Finn frem</a></p>
</div>
<div>
   <h2>Om nettstedet</h2>
   <p><a href="/om/regelverk/personvern/personvernerklering-nett.html">Bruk av informasjonskapsler</a><br>
   <a href="/ritmo/personer/stipendiater/balintl/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Tilgjengelighetserkl?ring</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Ansvarlig for denne siden</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/personer/stipendiater/balintl/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Logg inn
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/" title="G? til uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
