<!DOCTYPE html>
<html lang="en">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
      
        Alexander Szorkovszky
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="
      
        Alexander Szorkovszky
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Alexander Szorkovszky" />

    
      <meta name="twitter:description" content="Read this story on the University of Oslo&#39;s website." />
    

    
      <meta name="twitter:image" content="/ritmo/english/people/postdoctoral-fellows/alexansz/alex-01.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/english/people/postdoctoral-fellows/alexansz/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/english/people/postdoctoral-fellows/alexansz/index.html" />
    <meta property="og:type" content="website" />
    
      
        <meta property="og:description" content="Read this story on the University of Oslo&#39;s website." />
      
    

    

    
      
      
        
        
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context english faculty en '  id="vrtx-person">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Jump to content">
    <ul id="hidnav">
     <li><a href="#right-main">Jump to main content</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/english/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo/english" class="uio-host">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Language menu">
              <a href="/ritmo/" class="header-lang-no-link" lang="no">No</a>
              <span>En</span>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Menu"><span>Menu</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/english/for-employees/">For employees</a></li>
            <li class="my-studies"><a href="https://minestudier.no/en/index.html">My studies</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">Search our webpages</label>
            
            <button type="submit">Search</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="english parent-folder">
  <a href="/ritmo/english/">Home</a>
    </li>
    <li class="about">
  <a href="/ritmo/english/about/">About the Centre</a>
    </li>
    <li class="publications">
  <a href="/ritmo/english/publications/">Publications</a>
    </li>
    <li class="vrtx-active-item people vrtx-current-item" aria-current="page">
  <a href="/ritmo/english/people/">People</a>
    </li>
    <li class="news-and-events">
  <a href="/ritmo/english/news-and-events/">News and events</a>
    </li>
    <li class="research">
  <a href="/ritmo/english/research/">Research</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/english/">Go to uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Sub menu</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/english/people/"><span>People</span></a></li>
            <li class="vrtx-parent" ><a href="/ritmo/english/people/postdoctoral-fellows/"><span>Postdoctoral Fellows and Researchers</span></a>

      <ul>
          <li class="vrtx-child"><a class="vrtx-marked" aria-current="page" href="/ritmo/english/people/postdoctoral-fellows/alexansz/"><span>Alexander Szorkovsky</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="right-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Breadcrumbs">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4">
            <a href="/ritmo/english/people/">People</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-5 vrtx-breadcrumb-before-active">
            <a href="/ritmo/english/people/postdoctoral-fellows/">Postdoctoral Fellows and Researchers</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-6 vrtx-breadcrumb-active">Alexander Szorkovsky
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
      <div id="vrtx-content">
        <div id="vrtx-main-content">
          <h1>
      
        Alexander Szorkovszky
      </h1>
          
      
      
      
          <div id="vrtx-person-contact-info-wrapper">
              
      
        
        
        
          
          
            
            
            
            
              <img class="vrtx-person-image" src="/ritmo/english/people/postdoctoral-fellows/alexansz/alex-01.jpg" alt="Image of&nbsp;person" loading="lazy"/>
            
          
        
      
              
      <div class="vrtx-person-contactinfo">
        
        
        
          Could not get user data from external service
        
      </div>
              
      <div id="vrtx-person-contact-info-extras">
        
        
      </div>
              <div class="vrtx-person-contact-info-wrapper-end"></div>
          </div>
          <div id="vrtx-person-main-content-wrapper">
            <div class="vrtx-article-body">
              <h2>Academic interests</h2>

<p>I am an applied mathematician whose main research interests are collective behaviour, complex systems, sensorimotor learning, cultural evolution and artificial life, using the tools of dynamical systems, statistical and agent-based modelling. I recently completed a&nbsp;Marie Sk?odowska-Curie Actions fellowship (2021-2024) in which I developed adaptive robotic agents in order to uncover potential mechanisms behind entrainment in humans. Details of this work can be found in the project <a href="/ritmo/english/projects/synchronized-robotics/index.html">Synchronized Robotics</a>.</p>

<p>&nbsp;</p>

<h2>Academic history</h2>

<ul>
	<li>2014-2018: Postdoc, Mathematics Department, Uppsala University, Sweden</li>
	<li>2010-2014: PhD in Physics, University of Queensland, Australia</li>
	<li>2004-2008: B.Sc. (Hons.), Physics and Computer Science, University of New South Wales, Australia</li>
</ul>

<p>&nbsp;</p>

<p>&nbsp;</p>

<p>&nbsp;</p>

<p>&nbsp;</p>

            </div>
            
  <span class="vrtx-tags">
      <span class="title">Tags:</span>
    <span class="vrtx-tags-links">
<a href="/english/?vrtx=tags&amp;tag=Robotics&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Robotics</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Entrainment&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Entrainment</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Motor%20Control&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Motor Control</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Interaction&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Interaction</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Collective%20behaviour&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Collective behaviour</a>
    </span>
  </span>

            
      
      
      
      
      
      
        
        
      

      
      

      
        



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publications</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Scientific articles and book chapters</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">Other</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10306139" class="vrtx-external-publication">
        <div id="vrtx-publication-10306139">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10306139">
                Orioma, Charles; Krivan, Josef Jan; Mathema, Rujeena; Silva, Pedro Rego Lencastre e; Lind, Pedro &amp; Szorkovszky, Alexander
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/10306139/contributors', 'vrtx-publication-contributors-10306139')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Identification of fixations and saccades in eye-tracking data using adaptive threshold-based method.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        arXiv.
                </span>
                            
            doi: <a href="https://doi.org/10.48550/ARXIV.2512.23926">10.48550/ARXIV.2512.23926</a>.
            <a href="https://hdl.handle.net/11250/5336359">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Properties of ocular fixations and saccades are highly stochastic during many experimental tasks, and their statistics are often used as proxies for various aspects of cognition. Although distinguishing saccades from fixations is not trivial, experimentalists generally use common ad-hoc thresholds in detection algorithms. This neglects inter-task and inter-individual variability in oculomotor dynamics, and potentially biases the resulting statistics. In this article, we introduce and evaluate an adaptive method based on a Markovian approximation of eye-gaze dynamics, using saccades and fixations as states such that the optimal threshold minimizes state transitions. Applying this to three common threshold-based algorithms (velocity, angular velocity, and dispersion), we evaluate the overall accuracy against a multi-threshold benchmark as well as robustness to noise. We find that a velocity threshold achieves the highest baseline accuracy (90-93\%) across both free-viewing and visual search tasks. However, velocity-based methods degrade rapidly under noise when thresholds remain fixed, with accuracy falling below 20% at high noise levels. Adaptive threshold optimization via K-ratio minimization substantially improves performance under noisy conditions for all algorithms. Adaptive dispersion thresholds demonstrate superior noise robustness, maintaining accuracy above 81% even at extreme noise levels ({\sigma} = 50 px), though a precision-recall trade-off emerges that favors fixation detection at the expense of saccade identification. In addition to demonstrating our parsimonious adaptive thresholding method, these findings provide practical guidance for selecting and tuning classification algorithms based on data quality and analytical priorities.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10303487" class="vrtx-external-publication">
        <div id="vrtx-publication-10303487">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10303487">
                Bravo, Pedro Pablo Lucas; Szorkovszky, Alexander; Fasciani, Stefano &amp; Glette, Kyrre
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        HS-ims: A Platform for Human-Swarm Interactive Music Systems.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Tei, Kenji &amp; Zambonelli, Franco (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2025 IEEE International Conference on Autonomic Computing and Self-Organizing Systems (ACSOS).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331502157.</span>
                            
            doi: <a href="https://doi.org/10.1109/ACSOS-C66519.2025.00067">10.1109/ACSOS-C66519.2025.00067</a>.
            <a href="https://hdl.handle.net/11250/5334267">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper introduces HS-ims, an integrated virtual environment designed for developing real-time musical multiagent systems. It addresses the need for technological platforms that emphasize sound applications, flexible prototyping, and comprehensive data collection from these multi-agent systems. HS-ims offers a 3D virtual environment with built-in sound synthesis via binaural audio, an Open Sound Control (OSC) API implementation that is language-agnostic, and a data collection module. The platform operates with an External Controller managing multi-agent logic. We present relevant performance metrics and provide examples highlighting how HS-ims has proven valuable in research and education. This encourages further exploration and establishes it as a useful tool for music performance.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10253337" class="vrtx-external-publication">
        <div id="vrtx-publication-10253337">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10253337">
                Wallace, Benedikte; Glette, Kyrre &amp; Szorkovszky, Alexander
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        How can we make robot dance expressive and responsive? A survey of methods and future directions.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Computer Science.
                </span>
                            7.
            doi: <a href="https://doi.org/10.3389/fcomp.2025.1575667">10.3389/fcomp.2025.1575667</a>.
            <a href="https://hdl.handle.net/11250/4870868">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The development of robots that can dance like humans presents a complex challenge due to the disparate abilities involved and various aesthetic qualities that need to be achieved. This article reviews recent advances in robotics, artificial intelligence, and human-robot interaction toward enabling various aspects of realistic dance, and examines potential paths toward a fully embodied dancing agent. We begin by outlining the essential abilities required for a robot to perform human-like dance movements and the resulting aesthetic qualities, summarized under the terms expressiveness and responsiveness . Subsequently, we present a review of the current state-of-the-art in dance-related robot technology, highlighting notable achievements, limitations and trade-offs in existing systems. Our analysis covers various approaches, including traditional control systems, machine learning algorithms, and hybrid systems that aim to imbue robots with the capacity for responsive, expressive movement. Finally, we identify and discuss the critical gaps in current research and technology that need to be addressed for the full realization of realistic dancing robots. These include challenges in real-time motion planning, adaptive learning from human dancers, and morphology independence. By mapping out current methods and challenges, we aim to provide insights that may guide future innovations in creating more engaging, responsive, and expressive robotic systems.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2393753" class="vrtx-external-publication">
        <div id="vrtx-publication-2393753">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2393753">
                Bravo, Pedro Pablo Lucas; Fasciani, Stefano; Szorkovszky, Alexander &amp; Glette, Kyrre
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        An Interactive Self-Assembly Swarm Music System in Extended Reality.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sei?a, Mariana &amp; Wirfs-Brock, Jordan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;25: Proceedings of the 20th International Audio Mostly Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720659.</span>
                            
                <span class="vrtx-pages">p. 255–269.</span>
            doi: <a href="https://doi.org/10.1145/3771594.3771620">10.1145/3771594.3771620</a>.
            <a href="https://hdl.handle.net/11250/3372230">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores the music-making capabilities of a swarm intelligence type of algorithm known as self-assembly in an interactive context using Extended Reality (XR) technologies. We describe the modifications made to a fully autonomous version of this algorithm, which we proposed in a previous work, allowing us to adapt it for user-interactive music. Moreover, we present the design of an XR system that supports this adaptation, modelled as a human-swarm interactive music system, which is implemented in the Meta Quest 3 headset. An auto-ethnographic study was conducted to discover the affordances of the system in a music improvisation session. The study, supported by empirical measurements collected during the session, enables a comparison between the interactive version and the original autonomous offline version, providing valuable insights into how a user can influence the swarm&#39;s behaviour. The results are used to discuss the music performance possibilities and future directions for this type of interactive music system.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2337228" class="vrtx-external-publication">
        <div id="vrtx-publication-2337228">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2337228">
                Bhandari, Shailendra; Silva, Pedro Rego Lencastre e; Mathema, Rujeena; Szorkovszky, Alexander; Yazidi, Anis &amp; Lind, Pedro
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Modeling eye gaze velocity trajectories using GANs with spectral loss for enhanced fidelity.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Scientific Reports.
                </span>
                            15(1).
            doi: <a href="https://doi.org/10.1038/s41598-025-05286-5">10.1038/s41598-025-05286-5</a>.
            <a href="https://hdl.handle.net/11250/5015653">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Accurate modeling of eye gaze dynamics is essential for advancement in human-computer interaction, neurological diagnostics, and cognitive research. Traditional generative models like Markov models often fail to capture the complex temporal dependencies and distributional nuance inherent in eye gaze trajectories data. This study introduces a Generative Adversarial Network (GAN) framework employing Long Short-Term Memory (LSTM) and Convolutional Neural Network (CNN) generators and discriminators to generate high-fidelity synthetic eye gaze velocity trajectories. We conducted a comprehensive evaluation of four GAN architectures: CNN-CNN, LSTM-CNN, CNN-LSTM, and LSTM-LSTM–trained under two conditions: using only adversarial loss ($$L_G$$) and using a weighted combination of adversarial and spectral losses. Our findings reveal that the LSTM-CNN architecture trained with this new loss function exhibits the closest alignment to the real data distribution, effectively capturing both the distribution tails and the intricate temporal dependencies. The inclusion of spectral regularization significantly enhances the GANs’ ability to replicate the spectral characteristics of eye gaze movements, leading to a more stable learning process and improved data fidelity. Comparative analysis with a Hidden Markov Model (HMM) optimized to four hidden states further highlights the advantages of the LSTM-CNN GAN. Statistical metrics show that the HMM-generated data significantly diverges from the real data in terms of mean, standard deviation, skewness, and kurtosis. In contrast, the LSTM-CNN model closely matches the real data across these statistics, affirming its capacity to model the complexity of eye gaze dynamics effectively. These results position the spectrally regularized LSTM-CNN GAN as a robust tool for generating synthetic eye gaze velocity data with high fidelity. Its ability to accurately replicate both the distributional and temporal properties of real data holds significant potential for applications in simulation environments, training systems, and the development of advanced eye-tracking technologies, ultimately contributing to more naturalistic and responsive human-computer interactions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2301058" class="vrtx-external-publication">
        <div id="vrtx-publication-2301058">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2301058">
                Bravo, Pedro Pablo Lucas; Szorkovszky, Alexander; Fasciani, Stefano &amp; Glette, Kyrre
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Self-Assembly and Synchronization: Crafting Music with Multi-Agent Embodied Oscillators.
                </span>
                    <span class="vrtx-parent-contributors">
                            In IEEE, IEEE (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2024 IEEE International Conference on Autonomic Computing and Self-Organizing Systems (ACSOS).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798350363876.</span>
                            
                <span class="vrtx-pages">p. 145–150.</span>
            doi: <a href="https://doi.org/10.1109/ACSOS61780.2024.00034">10.1109/ACSOS61780.2024.00034</a>.
            <a href="https://hdl.handle.net/11250/4846136">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper proposes a self-assembly algorithm that generates rhythmic music. It uses multiple pulsed oscillators embedded in cube-shaped agents in a virtual 3D space. When these units connect with each other, their oscillators synchronize, triggering regular sound events that produce musical notes whose sound dynamics change based on the size of the structures formed. This study examines the synchronization time of these oscillators and the emergent properties of the structures formed during the algorithm’s execution. Moreover, the resulting sound, determined by multiple interactions among agents, is analyzed in the time and frequency domains from its signal. The results show that the synchronization time slightly increases when more agents participate, although with high variability. Also, a quasi-regular pattern of increase and decrease in the number of structures over time is observed. Additionally, the signal analysis illustrates the effect of the self-assembly strategy in terms of rhythmical patterns and sound energy over time. We discuss these results and the potential applications of this multi-agent approach in the sound and music field.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2282503" class="vrtx-external-publication">
        <div id="vrtx-publication-2282503">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2282503">
                Bravo, Pedro Pablo Lucas; Fasciani, Stefano; Szorkovszky, Alexander &amp; Glette, Kyrre
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interactive Sonification of 3D Swarmalators,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        NIME.
                </span>
                            
                <span class="vrtx-pages">p. 252–260.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.13904846">10.5281/zenodo.13904846</a>.
            <a href="https://hdl.handle.net/10852/111717">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores the sound and music possibilities obtained from the sonification of a swarm of coupled oscillators moving in a virtual space called “Swarmalators”. We describe the design and implementation of a Human-Swarm Interactive Music System based on the 3D version of the Swarmalator model, which is used for signal analysis of the overall sound output in terms of scalability; that is, the effect of varying the number of agents in a swarm system. We also study the behaviour of autonomous swarmalators in the presence of one user-controlled agent, which we call the interactive swarmalator. We observed that sound frequencies barely deviate from their initial values when there are few agents, but they diverge significantly in a highly dense swarm. Additionally, with the inclusion of the interactive swarmalator, the group’s behaviour tends to adjust towards it. We use these results to explore the potential of swarmalators in music performance under various scenarios. Finally, we discuss opportunities and challenges to use the Swarmalator model for sound and music systems.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2207616" class="vrtx-external-publication">
        <div id="vrtx-publication-2207616">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2207616">
                Veenstra, Frank; Szorkovszky, Alexander &amp; Glette, Kyrre
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Decentralized Control and Morphological Evolution of 2D Virtual Creatures.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Iizuki, Hiroyuki; Suzuki, Keisuke; Uno, Ryoko; Damiano, Luisa; Spychala, Nadine; Aguilera, Miguel; Izquierdo, Eduardo; Suzuki, Reiji &amp; Baltieri, Manuel (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    ALIFE 2023: Ghost in the Machine: Proceedings of the 2023 Artificial Life Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=787501B7-4C33-4FC8-8689-95E5449219EC">MIT Press</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.1162/isal_a_00656">10.1162/isal_a_00656</a>.
            <a href="https://hdl.handle.net/11250/3308059">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2208623" class="vrtx-external-publication">
        <div id="vrtx-publication-2208623">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2208623">
                Corral-Lopez, Alberto; Bloch, Natasha I.; Bijl, Wouter van der; Cortazar-Chinarro, Maria; Szorkovszky, Alexander &amp; Kotrschal, Alexander
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2208623/contributors', 'vrtx-publication-contributors-2208623')">
                    [Show all&nbsp;11&nbsp;contributors for this article]</a>
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Functional convergence of genomic and transcriptomic architecture underlies schooling behaviour in a live-bearing fish.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Nature Ecology and Evolution.
                </span>
                            
            doi: <a href="https://doi.org/10.1038/s41559-023-02249-9">10.1038/s41559-023-02249-9</a>.
            <a href="https://hdl.handle.net/11250/4394174">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Abstract The organization and coordination of fish schools provide a valuable model to investigate the genetic architecture of affiliative behaviours and dissect the mechanisms underlying social behaviours and personalities. Here we used replicate guppy selection lines that vary in schooling propensity and combine quantitative genetics with genomic and transcriptomic analyses to investigate the genetic basis of sociability phenotypes. We show that consistent with findings in collective motion patterns, experimental evolution of schooling propensity increased the sociability of female, but not male, guppies when swimming with unfamiliar conspecifics. This finding highlights a relevant link between coordinated motion and sociability for species forming fission–fusion societies in which both group size and the type of social interactions are dynamic across space and time. We further show that alignment and attraction, the two major traits forming the sociability personality axis in this species, showed heritability estimates at the upper end of the range previously described for social behaviours, with important variation across sexes. The results from both Pool-seq and RNA-seq data indicated that genes involved in neuron migration and synaptic function were instrumental in the evolution of sociability, highlighting a crucial role of glutamatergic synaptic function and calcium-dependent signalling processes in the evolution of schooling.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2193632" class="vrtx-external-publication">
        <div id="vrtx-publication-2193632">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2193632">
                Corral-Lopez, Alberto; Kotrschal, Alexander; Szorkovszky, Alexander; Garate-Olaizola, Maddi; Herbert-Read, James &amp; Bijl, Wouter van der
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2193632/contributors', 'vrtx-publication-contributors-2193632')">
                    [Show all&nbsp;13&nbsp;contributors for this article]</a>
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Evolution of schooling drives changes in neuroanatomy and motion characteristics across predation contexts in guppies.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Nature Communications.
                </span>
                            14(1).
            doi: <a href="https://doi.org/10.1038/s41467-023-41635-6">10.1038/s41467-023-41635-6</a>.
            <a href="https://hdl.handle.net/10852/108925">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Abstract One of the most spectacular displays of social behavior is the synchronized movements that many animal groups perform to travel, forage and escape from predators. However, elucidating the neural mechanisms underlying the evolution of collective behaviors, as well as their fitness effects, remains challenging. Here, we study collective motion patterns with and without predation threat and predator inspection behavior in guppies experimentally selected for divergence in polarization, an important ecological driver of coordinated movement in fish. We find that groups from artificially selected lines remain more polarized than control groups in the presence of a threat. Neuroanatomical measurements of polarization-selected individuals indicate changes in brain regions previously suggested to be important regulators of perception, fear and attention, and motor response. Additional visual acuity and temporal resolution tests performed in polarization-selected and control individuals indicate that observed differences in predator inspection and schooling behavior should not be attributable to changes in visual perception, but rather are more likely the result of the more efficient relay of sensory input in the brain of polarization-selected fish. Our findings highlight that brain morphology may play a fundamental role in the evolution of coordinated movement and anti-predator behavior.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2208490" class="vrtx-external-publication">
        <div id="vrtx-publication-2208490">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2208490">
                Szorkovszky, Alexander; Veenstra, Frank &amp; Glette, Kyrre
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Toward cultures of rhythm in legged robots.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Iizuki, Hiroyuki; Suzuki, Keisuke; Uno, Ryoko; Damiano, Luisa; Spychala, Nadine; Aguilera, Miguel; Izquierdo, Eduardo; Suzuki, Reiji &amp; Baltieri, Manuel (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    ALIFE 2023: Ghost in the Machine: Proceedings of the 2023 Artificial Life Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=787501B7-4C33-4FC8-8689-95E5449219EC">MIT Press</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.1162/isal_a_00673">10.1162/isal_a_00673</a>.
            <a href="https://hdl.handle.net/10852/109278">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">It is widely thought that sensorimotor synchronization, underpinning cultural domains such as music and dance, played a critical role in the evolution of human sociality. Here, we present virtual legged robots controlled by central pattern generators (CPGs) that evolve to synchronize motion to rhythmic sensory input in real time. Multi-stage, multi-objective evolutionary algorithms were used to maximize flexibility of the CPGs with respect to control parameters, and then to optimize a neural input layer for wide-ranging susceptibility to rhythmic inputs. The evolved CPGs self-organize to accommodate the input sequence over a range of frequencies and patterns while keeping the agents upright. We show how this behaviour can be scaled up to multiple interacting agents, including with differing morphologies, to produce novel behaviours. We then outline how spike timing dependent plasticity can be used for the acquisition of new motor patterns. Finally, taking inspiration from biocultural evolution and cognitive neuroscience, we suggest ways in which real-time social adaptation can play a key role in the evolution of complex social behaviours in robots.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2183106" class="vrtx-external-publication">
        <div id="vrtx-publication-2183106">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2183106">
                Szorkovszky, Alexander; Veenstra, Frank &amp; Glette, Kyrre
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        From real-time adaptation to social learning in robot ecosystems.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Robotics and AI.
                </span>
                            10.
            doi: <a href="https://doi.org/10.3389/frobt.2023.1232708">10.3389/frobt.2023.1232708</a>.
            <a href="https://hdl.handle.net/10852/108916">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">While evolutionary robotics can create novel morphologies and controllers that are well-adapted to their environments, learning is still the most efficient way to adapt to changes that occur on shorter time scales. Learning proposals for evolving robots to date have focused on new individuals either learning a controller from scratch, or building on the experience of direct ancestors and/or robots with similar configurations. Here we propose and demonstrate a novel means for social learning of gait patterns, based on sensorimotor synchronization. Using movement patterns of other robots as input can drive nonlinear decentralized controllers such as CPGs into new limit cycles, hence encouraging diversity of movement patterns. Stable autonomous controllers can then be locked in, which we demonstrate using a quasi-Hebbian feedback scheme. We propose that in an ecosystem of robots evolving in a heterogeneous environment, such a scheme may allow for the emergence of generalist task-solvers from a population of specialists.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2162884" class="vrtx-external-publication">
        <div id="vrtx-publication-2162884">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2162884">
                Szorkovszky, Alexander; Veenstra, Frank &amp; Glette, Kyrre
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Central pattern generators evolved for real-time adaptation to rhythmic stimuli.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Bioinspiration &amp; Biomimetics.
                </span>
                <span class="vrtx-issn">ISSN 1748-3182.</span>
                            18(4).
            doi: <a href="https://doi.org/10.1088/1748-3190/ace017">10.1088/1748-3190/ace017</a>.
            <a href="https://hdl.handle.net/10852/110207">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Abstract For a robot to be both autonomous and collaborative requires the ability to adapt its movement to a variety of external stimuli, whether these come from humans or other robots. Typically, legged robots have oscillation periods explicitly defined as a control parameter, limiting the adaptability of walking gaits. Here we demonstrate a virtual quadruped robot employing a bio-inspired central pattern generator (CPG) that can spontaneously synchronize its movement to a range of rhythmic stimuli. Multi-objective evolutionary algorithms were used to optimize the variation of movement speed and direction as a function of the brain stem drive and the centre of mass control respectively. This was followed by optimization of an additional layer of neurons that filters fluctuating inputs. As a result, a range of CPGs were able to adjust their gait pattern and/or frequency to match the input period. We show how this can be used to facilitate coordinated movement despite differences in morphology, as well as to learn new movement patterns.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2162885" class="vrtx-external-publication">
        <div id="vrtx-publication-2162885">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2162885">
                Gyllingberg, Linnéa; Szorkovszky, Alexander &amp; Sumpter, David
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Using neuronal models to capture burst-and-glide motion and leadership in fish.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of the Royal Society Interface.
                </span>
                <span class="vrtx-issn">ISSN 1742-5689.</span>
                            20(204),
                <span class="vrtx-pages">p. 1–13.</span>
            doi: <a href="https://doi.org/10.1098/rsif.2023.0212">10.1098/rsif.2023.0212</a>.
            <a href="https://hdl.handle.net/11250/4830925">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2192971" class="vrtx-external-publication">
        <div id="vrtx-publication-2192971">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192971">
                Szorkovszky, Alexander; Veenstra, Frank; Lartillot, Olivier Serge Gabriel; Jensenius, Alexander Refsum &amp; Glette, Kyrre
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied Tempo Tracking with a Virtual Quadruped,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2023.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network .
                </span>
                <span class="vrtx-issn">ISSN 9789152773727.</span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.10060970">10.5281/zenodo.10060970</a>.
            <a href="https://hdl.handle.net/11250/5089382">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Dynamic attending theory posits that we entrain to time-structured events in a similar way to synchronizing oscillators. Hence, a tempo tracker based on oscillators may replicate humans&#39; ability to rapidly and robustly identify musical tempi. We demonstrate this idea using virtual quadrupeds, whose gaits are controlled by oscillatory neural circuits known as central pattern generators (CPGs). The quadruped CPGs were first optimized for flexible gait frequency and direction, and then an additional recurrent layer was optimized for entrainment to isochronous pulses. Using excerpts of musical pieces, we find that the motion of these agents can rapidly entrain to simple rhythms. Performance was found to be partially predicted by pulse entropy, a measure of the sample&#39;s rhythmic complexity. Notably, in addition to having wide tempo ranges, the best performing agents can also entrain to rhythms that are periodic but not quantized on a grid. Our approach offers an embodied alternative to other dynamical systems-based approaches to entrainment, such as gradient-frequency arrays. Such agents could find use as participants in virtual musicking environments, or as real-world musical robots.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2061461" class="vrtx-external-publication">
        <div id="vrtx-publication-2061461">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2061461">
                Szorkovszky, Alexander; Veenstra, Frank &amp; Glette, Kyrre
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Rapid rhythmic entrainment in bio-inspired central pattern generators,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2022 International Joint Conference on Neural Networks (IJCNN).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781728186719.</span>
                            
            doi: <a href="https://doi.org/10.1109/IJCNN55064.2022.9891909">10.1109/IJCNN55064.2022.9891909</a>.
            <a href="https://hdl.handle.net/10852/110364">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Entrainment of movement to a periodic stimulus is a characteristic intelligent behaviour in humans and an important goal for adaptive robotics. We demonstrate a quadruped central pattern generator (CPG), consisting of modified Matsuoka neurons, that spontaneously adjusts its period of oscillation to that of a periodic input signal. This is done by simple forcing, with the aid of a filtering network as well as a neural model with tonic input-dependent oscillation period. We first use the NSGA3 algorithm to evolve the CPG parameters, using separate fitness functions for period tunability, limb homogeneity and gait stability. Four CPGs, maximizing different weighted averages of the fitness functions, are then selected from the Pareto front and each is used as a basis for optimizing a filter network. Different numbers of neurons are tested for each filter network. We find that period tunability in particular facilitates robust entrainment, that bounding gaits entrain more easily than walking gaits, and that more neurons in the filter network are beneficial for pre-processing input signals. The system that we present can be used in conjunction with sensory feedback to allow low-level adaptive and robust behaviour in walking robots.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2063364" class="vrtx-external-publication">
        <div id="vrtx-publication-2063364">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2063364">
                Levens, Watson; Szorkovszky, Alexander &amp; Sumpter, David
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Friend of a friend models of network growth.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Royal Society Open Science.
                </span>
                            9(10),
                <span class="vrtx-pages">p. 1–17.</span>
            doi: <a href="https://doi.org/10.1098/rsos.221200">10.1098/rsos.221200</a>.
            <a href="https://hdl.handle.net/11250/4698354">Full text in Research Archive</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1329300">View all works in NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-2069251" class="vrtx-external-publication">
        <div id="vrtx-publication-2069251">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2069251">
                Szorkovszky, Alexander; Veenstra, Frank &amp; Glette, Kyrre
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From real-time adaptation to social learning in robots.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4240629">Full text in Research Archive</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1329300">View all works in NVA</a></p>
    </div>

      </div>
    </div>



      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Published</span>
        <span class="published-date">Sep. 22, 2021 9:35 AM </span>
        
        - <span class="last-modified-date">Last modified</span>
        <span class="last-modified-date">Feb. 7, 2024 1:40 PM</span>
        
        </div>
      
          </div>
        </div>
        <div id="vrtx-additional-content">
          
      
          

<div class="vrtx-projects vrtx-frontpage-box">
  <h2>Projects</h2>

  <div class="vrtx-box-content">
  <ul class="only-links">
      <li><a href="https://www.mn.uio.no/ifi/english/research/groups/robin/research-projects/cocomo/index.html">COCOMO: Co-evolution of Control and Morphologies</a></li>
  </ul>

        <div id="vrtx-related-projects-completed" class="vrtx-related-projects-completed">
          <h3>Completed projects</h3>
          
          
          
  <ul class="only-links">
      <li><a href="/ritmo/english/projects/synchronized-robotics/index.html">Synchronized Robotics</a></li>
  </ul>
        </div>
        <span id="vrtx-related-projects-completed-toggle-wrapper" style="display: none">
          <a id="vrtx-related-projects-completed-toggle" href="javascript:void(0);">Show completed projects</a>
        </span>
  </div>
</div>



          
          
      
      
        <div id="vrtx-related-content">
          <p><a href="https://scholar.google.com/citations?hl=en&amp;user=RkmO43EAAAAJ&amp;view_op=list_works&amp;sortby=pubdate">Google Scholar</a></p>

<p><a href="https://mas.to/@pendelduva">Mastodon</a></p>

        </div>
      
        </div>
      </div>
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo/english">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Contact information</h2>
   <p><a href="/ritmo/english/about/">Contact us</a><br>
   <a href="/english/about/getting-around/areas/gaustad/ga09/">Find us</a></p>
</div>
<div>
   <h2>About the website</h2>
   <p><a href="/english/about/regulations/privacy-declarations/privacy-policy-web.html">Cookies</a><br>
   <a href="/ritmo/english/people/postdoctoral-fellows/alexansz/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Accessibility statement (in Norwegian only)</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Responsible for this page</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/english/people/postdoctoral-fellows/alexansz/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Log in
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/english/" title="Go to uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
