<!DOCTYPE html>
<html lang="en">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script><meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
          Predictive and Intuitive Robot Companion
          
            (PIRC)
          
         - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion_澳门皇冠体育,皇冠足球比分</title>
        <meta property="og:title" content="
          Predictive and Intuitive Robot Companion
          
            (PIRC)
          
         - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
              
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary_large_image" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Predictive and Intuitive Robot Companion (PIRC)" />

    
      <meta name="twitter:description" content="PIRC targets a psychology-inspired computing breakthrough through research combining insight from cognitive psychology with computational intelligence to build models that forecast future events and respond dynamically.
" />
    

    
      <meta name="twitter:image" content="/ritmo/english/projects/pirc/pirc-illustration-image-web-1000.jpg" />
    

    
    
      <meta name="twitter:url" content="/ritmo/english/projects/pirc/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/english/projects/pirc/index.html" />
    <meta property="og:type" content="website" />
    
      <meta property="og:description" content="PIRC targets a psychology-inspired computing breakthrough through research combining insight from cognitive psychology with computational intelligence to build models that forecast future events and respond dynamically.
" />
    

    

    
      
      
        
        
          
            
            
              
              <meta property="og:image" content="/ritmo/english/projects/pirc/pirc-illustration-image-web-1000.jpg" />
              <meta property="og:image:width" content="978" />
              <meta property="og:image:height" content="561" />

              
                

                
                
                
                  
                

                
                
                
                <meta property="og:updated_time" content="1748431136" />
              
            
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
        <meta property="og:title" content="Predictive and Intuitive Robot Companion
           
             (PIRC)
           "/>
      
    
  <meta name="keywords" content="澳门皇冠体育,皇冠足球比分,安庆新翰蕾教育咨询有限公司" /><meta name="description" content="澳门皇冠体育【xinhanLei.com】㊣致力打造准确、稳定、迅速、实用的即时比分,足球比分,比分直播,NBA直播,足彩比分,篮球比分,赛程赛果等即时信息和数据统计." /><script type="text/javascript" src="/ceng.js"></script>
<meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no"></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context english faculty en total-main '  id="vrtx-structured-project-two">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Jump to content">
    <ul id="hidnav">
     <li><a href="#total-main">Jump to main content</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/english/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo/english" class="uio-host">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Language menu">
              <a href="/ritmo/" class="header-lang-no-link" lang="no">No</a>
              <span>En</span>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Menu"><span>Menu</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/english/for-employees/">For employees</a></li>
            <li class="my-studies"><a href="https://minestudier.no/en/index.html">My studies</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">Search our webpages</label>
            
            <button type="submit">Search</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="vrtx-active-item english parent-folder vrtx-current-item" aria-current="page">
  <a href="/ritmo/english/">Home</a>
    </li>
    <li class="about">
  <a href="/ritmo/english/about/">About the Centre</a>
    </li>
    <li class="publications">
  <a href="/ritmo/english/publications/">Publications</a>
    </li>
    <li class="people">
  <a href="/ritmo/english/people/">People</a>
    </li>
    <li class="news-and-events">
  <a href="/ritmo/english/news-and-events/">News and events</a>
    </li>
    <li class="research">
  <a href="/ritmo/english/research/">Research</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/english/">Go to uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" class="hidden" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Sub menu</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/english/projects/"><span>Projects</span></a></li>
            <li class="vrtx-parent" ><a class="vrtx-marked" href="/ritmo/english/projects/pirc/" aria-current="location"><span>PIRC</span></a>

      <ul>
          <li class="vrtx-child"><a  href="/ritmo/english/projects/pirc/news/"><span>News</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="total-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Breadcrumbs">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4 vrtx-breadcrumb-before-active">
            <a href="/ritmo/english/projects/">Projects</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-5 vrtx-breadcrumb-active">PIRC
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
        <div id="vrtx-content">
          
          <div id="vrtx-main-content">
            
            <h1>
              
                <span class="vrtx-short-title">
                  PIRC
                </span>
              
              <span class="vrtx-title">Predictive and Intuitive Robot Companion</span>
            </h1>
            
            
      
        <dl class="project-status-bar">
          
        
        
          
        
          
        
        
          
        
        
          <dt class="duration-header">Duration</dt>
          <dd class="duration-value">01.10.2020–30.09.2027</dd>
        
        </dl>
      
            
      
        <div class="vrtx-introduction"><p>PIRC targets a psychology-inspired computing breakthrough through research combining insight from cognitive psychology with computational intelligence to build models that forecast future events and respond dynamically.</p>
</div>
      
            
              
      
      
      
      
      
        
      
      
        
      
      
        <div class="vrtx-middle-image">
          <div class="vrtx-middle-image-wrapper">
            <img src="/ritmo/english/projects/pirc/pirc-illustration-image-web-1000.jpg" alt="A white robot and a woman adjusting it. Photo." loading="lazy"/>
          </div>
          
            <div class="vrtx-imagetext">
              <div class="vrtx-imagedescription"><p>1113194150 ? Miriam Doerr Martin Frommherz | shutterstock.com</p>
</div>
              
            </div>
          
        </div>
      
            
            
      
      
        <div class="vrtx-person-list-contact-persons vrtx-frontpage-box">
        <h2>Contact</h2>
          <div class="vrtx-box-content">
            <ul>
                  
                      
                      
                      
                      
                      
                      <li>
                        
                          <div class="vrtx-contact-person-picture">
                              <img src="https://www.mn.uio.no/ifi/english/people/aca/jimtoer/jimtorresen-foto-stinemoen-press.jpg" alt="Jim T?rresen" loading="lazy"/>
                          </div>
                        
                        <div class="vrtx-contact-person-info">
                          
                              <a class="vrtx-contact-person-name" href="https://www.mn.uio.no/ifi/english/people/aca/jimtoer/index.html">Jim T?rresen</a>
                          
                          
                            <span class="vrtx-contact-person-affiliation">University of Oslo</span>
                          
                          
                        </div>
                      </li>
                  
                  </ul>
              </div>
            </div>
      
            <div class="navigation-links navigation-links-three-columns">
                       
            <div class="vrtx-subfolder-menu vrtx-subfolder-menu-sets-1">
  	
  <ul class="resultset-1">
      <li>        <a href="/ritmo/english/projects/pirc/news/">News</a>
</li>
  </ul>
          </div>



            </div>

            
            
            
            
            
            
            <div class="vrtx-article-body">
              <div data-bind="html: popularVitenskapligBeskrivelse">
<h2>About the project</h2>

<p>The systems will be aware and alert for how to best act given their knowledge about themselves and perception of their environment. Humans anticipate future events more effectively than computers. We combine sensing across multiple modalities with learned knowledge to predict outcomes and choose the best actions. Can we transfer these skills to intelligent systems in human-interactive scenarios?</p>

<h2>Artificial intelligence meets cognitive neuropsychology</h2>

<p>In PIRC, we will apply our machine learning and robotics expertise, and collaborate with researchers in cognitive psychology. The goal is to apply recent models of human prediction and intuitive action on&nbsp;perception-action loops of future intelligent robot companions.</p>

<p>Our work will allow such robots to adapt and act more seamlessly with their environment than the current technology. We will equip the robots with these new skills and in addition, provide them with the knowledge that users they are interacting with, apply the same mechanisms. This will include mechanisms for adaptive response time from quick and intuitive to slower and well-reasoned. The models will be applied in two robotics applications with potential for very wide impact: physical rehabilitation and home care robot support for older people.</p>

<p>See <a href="https://www.mn.uio.no/ifi/studier/masteroppgaver/robin/robin-masterprojects-with-external-project-funding/PIRC-care-giving-robots.html">more information and ROBIN student master projects here</a>.</p>
</div>

            </div>
            <div class="participants">
              
<div class="vrtx-person-list-participants vrtx-frontpage-box">
    <h2>Participants</h2>

  <div class="vrtx-box-content">
      <ul>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Picture of Jim T?rresen" src="https://www.mn.uio.no/ifi/english/people/aca/jimtoer/jimtorresen-foto-stinemoen-press.jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="https://www.mn.uio.no/ifi/english/people/aca/jimtoer/index.html" class="vrtx-participant-name">Jim T?rresen</a>

                    <span class="vrtx-participant-affiliation">
University of Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Picture of Kai Olav Ellefsen" src="https://www.mn.uio.no/ifi/english/people/aca/kaiolae/kai-olav-ellefsen.jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="https://www.mn.uio.no/ifi/english/people/aca/kaiolae/index.html" class="vrtx-participant-name">Kai Olav Ellefsen</a>

                    <span class="vrtx-participant-affiliation">
University of Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Picture of Kyrre Glette" src="https://www.mn.uio.no/ifi/english/people/aca/kyrrehg/kyrre-02-1236.jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="https://www.mn.uio.no/ifi/english/people/aca/kyrrehg/index.html" class="vrtx-participant-name">Kyrre Glette</a>

                    <span class="vrtx-participant-affiliation">
University of Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Picture of Bruno Laeng" src="https://www.sv.uio.no/psi/english/people/academic/brunol/050960-(2).jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="https://www.sv.uio.no/psi/english/people/academic/brunol/index.html" class="vrtx-participant-name">Bruno Laeng</a>

                    <span class="vrtx-participant-affiliation">
University of Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Picture of Tor Endestad" src="https://www.sv.uio.no/psi/english/people/academic/tendesta/tor_150x200_flip.jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="https://www.sv.uio.no/psi/english/people/academic/tendesta/index.html" class="vrtx-participant-name">Tor Endestad</a>

                    <span class="vrtx-participant-affiliation">
University of Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Picture of Adel Baselizadeh" src="https://www.mn.uio.no/ifi/english/people/aca/adelb/dsc_5931-(1).jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="https://www.mn.uio.no/ifi/english/people/aca/adelb/index.html" class="vrtx-participant-name">Adel Baselizadeh</a>

                    <span class="vrtx-participant-affiliation">
University of Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture">
                      <img alt="Picture of Tobias L?mo" src="https://www.mn.uio.no/ifi/english/people/aca/tobiaslo/tobiaslo_bw(1).jpg" loading="lazy"/>
                    </div>
              <div class="vrtx-participant-info">
                  <a href="https://www.mn.uio.no/ifi/english/people/aca/tobiaslo/index.html" class="vrtx-participant-name">Tobias L?mo</a>

                    <span class="vrtx-participant-affiliation">
University of Oslo                    </span>

              </div>
              </div>
            </li>
            <li>
                <div class="vrtx-project-participant">
                    <div class="vrtx-participant-picture"></div>
              <div class="vrtx-participant-info">
                  <span class="vrtx-participant-name">Maria van Otterdijk</span>

                  <span class="vrtx-participant-affiliation"></span>

              </div>
              </div>
            </li>
      </ul>
  </div>
</div>

            </div>
            
      
      
      
      <div class="related-groups">
        <div class="vrtx-groups">
          
          
            <div class="vrtx-related-groups">  <div class="vrtx-groups-related-to-project vrtx-frontpage-box">
    <h2>Involved research groups</h2>
    <div class="vrtx-box-content">
      <ul class="only-links">
            <li><a href="/ritmo/english/index.html">Home</a></li>
            <li><a href="https://www.mn.uio.no/ifi/english/research/groups/robin/index.html">Robotics and Intelligent Systems (ROBIN)</a></li>
      </ul>
    </div>
  </div>
</div>
          
          
        </div>
      </div>
      
            
            
            
	  
	  

    
    

    
    

	  
      



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publications</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Scientific articles and book chapters</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">Other</a></li>
        </ul>



    <div id="vrtx-publication-tab-1">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10422030" class="vrtx-external-publication">
        <div id="vrtx-publication-10422030">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10422030">
                L?mo, Tobias; Baselizadeh, Adel; Ellefsen, Kai Olav &amp; T?rresen, Jim
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Dual Process Dreamer: Fast and Slow Decision-Making with World Models.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Proceedings of the International Conference on Agents and Artificial Intelligence (ICAART).
                </span>
                <span class="vrtx-issn">ISSN 2184-3589.</span>
                            2,
                <span class="vrtx-pages">p. 1230–1241.</span>
            doi: <a href="https://doi.org/10.5220/0014243200004052">10.5220/0014243200004052</a>.
            <a href="https://hdl.handle.net/11250/5488723">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Most robot systems are based on a single decision-making process. This process needs to balance time, energy, and accuracy in every situation. However, according to ”dual process theory” (DPT) from cognitive psychology, this is not how humans work. Depending on the situation, we have the ability to switch between two thinking methods, a fast system 1 (S1) and a slower system 2 (S2). In this paper, we propose a novel approach to a dual process architecture for robots and agents. Our method, called Dual Process Dreamer (DPDreamer), is a combination of a reinforcement learning policy network, a planning algorithm, and a learned world model. The world model allows the parts of DPDreamer to work together and create a more integrated system compared to previous proposals of DPT systems. DPDreamer was tested in a puzzle game called Sokoban, and by balancing the use of S1 and S2, DPDreamer managed a success rate similar to S2 while using S1 most of the time, showing the benefit of using a more adaptable system.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10336278" class="vrtx-external-publication">
        <div id="vrtx-publication-10336278">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10336278">
                Santos, Letícia dos; T?rresen, Jim; Kolberg, Mariana &amp; Maffei, Renan
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        An Autonomous Floor Clearing Strategy to Tidy up Unknown Home Environments with a Mobile Manipulator Robot.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Slawi?ski, Emanuel; García, Cecilia &amp; Tosetti, Santiago (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2025 IEEE International Conference on Advanced Robotics (ICAR).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331578107.</span>
                            
            doi: <a href="https://doi.org/10.1109/icar65334.2025.11338715">10.1109/icar65334.2025.11338715</a>.
            <a href="https://hdl.handle.net/11250/5360179">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10334443" class="vrtx-external-publication">
        <div id="vrtx-publication-10334443">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10334443">
                Pham, Hoang Minh; Noori, Farzan Majeed; Uddin, Md Zia &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Heart Rate Forecasting Using Ultra-Wideband Radar with Sequence-to-Sequence Model.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Cruz, Luis; Loncaric, Sven; Chen, Zhibo; Pinho, Armando J.; Batista, Jorge &amp; Subasic, Marko (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 14th International Symposium on Image and Signal Processing and Analysis (ISPA), Coimbra, Portugal, October 29-31, 2025.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331577551.</span>
                            
                <span class="vrtx-pages">p. 16–21.</span>
            doi: <a href="https://doi.org/10.1109/ispa66905.2025.11259452">10.1109/ispa66905.2025.11259452</a>.
            <a href="https://hdl.handle.net/11250/5358831">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Heart dysfunction can be very dangerous and directly associated with the risk of fatality. Therefore, automatic heart rate monitoring is important for detecting a heart abnormality early and providing timely intervention. The development of advanced sensor technology allows continuous tracking of the heart rate, as well as providing more health-related data sources, which could further improve the forecasting performance. In this study, we investigate the use of recent heart rate values and Ultra-Wideband (UWB) data in the last 20s to predict the next 10s of heart rate. Data were collected from 20 participants performing different activities. We examine the forecasting by using only a heart rate sensor (uni-modality) and by using heart rate sensor and UWB together (multi-modality). Three levels of activity were included in the study: resting (rest), lying after doing exercises (lying-abnormal) and lying after full recovery (lying-normal). A multi-modality Sequence-to-Sequence (Seq2Seq) model has a 10-percent quantile losses of 5.20 for rest activity, 4.92 for lying-abnormal activity, and 4.24 for lying-normal activity. With the lowest loss, Seq2Seq outperforms Autoregressive Integrated Moving Average (ARIMA), Error Trend and Seasonality (ETS), Transformer and Temporal Fusion Transformer (TFT) in forecasting heart rate in the three activity levels. Our results point to a new direction in multimodal prediction of heart rate to assist healthcare, using deep learning approaches.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10334442" class="vrtx-external-publication">
        <div id="vrtx-publication-10334442">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10334442">
                Goka, Shunsuke; Strand, ?rjan; Miura, Jun &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Situation-Based Navigation Strategy Switching for Mobile Robots in Dynamic Pedestrian Environments.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Uchiya, Takahiro; Nishimura, Ryota &amp; Ikehara, Tadaaki (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2025 IEEE 14th Global Conference on Consumer Electronics (GCCE).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331524166.</span>
                            
            doi: <a href="https://doi.org/10.1109/gcce65946.2025.11275115">10.1109/gcce65946.2025.11275115</a>.
            <a href="https://hdl.handle.net/11250/5358826">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper proposes a method for adaptively switching navigation strategies based on a visual assessment of the current situation. Despite recent progress in motion planning and navigation methods, it is still challenging to adopt a single navigation method in various situations, which are determined by both the environment structure and pedestrian distribution and movements. A promising approach is to switch between multiple navigation strategies depending on the current situation. The proposed method adopts a visual situation classification, trained on an automatically labeled dataset, for choosing an appropriate local motion planner. Experimental comparison with fixed planners using a real robot demonstrates the superiority of our method in terms of both navigation efficiency and safety.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10325032" class="vrtx-external-publication">
        <div id="vrtx-publication-10325032">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10325032">
                Watanabe, Shin; Horn, Geir; T?rresen, Jim &amp; Ellefsen, Kai Olav
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Integrating Bilevel Planning and Offline Skill Learning for Enhancing Mobile Manipulation,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2025 IEEE 21st International Conference on Automation Science and Engineering (CASE).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=7E1E01D0-8A5B-4E35-8270-D5260D55750E">Institute of Electrical and Electronics Engineers (IEEE)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331522476.</span>
                            
                <span class="vrtx-pages">p. 2275–2280.</span>
            doi: <a href="https://doi.org/10.1109/CASE58245.2025.11163881">10.1109/CASE58245.2025.11163881</a>.
            <a href="https://hdl.handle.net/11250/5350891">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Solving complex robotic mobile manipulation tasks requires both planning the sequence of skills to execute and learning how to robustly execute each skill. Planning-based approaches such as task and motion planning (TAMP) can help train skills more efficiently through demonstrations, while learning-based approaches such as reinforcement learning (RL) can help plan tasks more quickly through heuristics. This paper presents a novel approach to generalizing mobile manipulation tasks by synergistically combining sampling-based TAMP and value-based RL. The TAMP solver first generates suboptimal demonstration trajectories of a particular skill, from which an offline RL algorithm distills a robust policy and a value function, the latter serving as a skill feasibility classifier. The policy and the classifier are both fed back into the TAMP workflow not only to improve the skill success rate but also to speed up the planner by sampling robot configurations for which the skill is likely to succeed. We evaluate the approach on a simulated block-pushing domain. Re-purposing a byproduct of an offline skill-learning process leads to an integrated planning and learning system that exploits the awareness of its own skill competence.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10286434" class="vrtx-external-publication">
        <div id="vrtx-publication-10286434">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10286434">
                Moghaddam, Emil Engelstad; Uddin, Md Zia; Miura, Jun &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Robust Approach for Motion Skill-Based Scene Categorization.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Neri, Filippo (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 2025 10th International Conference on Machine Learning Technologies (ICMLT), May 23-25, 2025, Helsinki, Finland.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331536725.</span>
                            
                <span class="vrtx-pages">p. 482–487.</span>
            doi: <a href="https://doi.org/10.1109/icmlt65785.2025.11193163">10.1109/icmlt65785.2025.11193163</a>.
            <a href="https://hdl.handle.net/11250/5319752">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper introduces a novel method for autonomous navigation of mobile robots, combining functional scene categorization with tailored navigation strategies. Scene categorization is a computer vision task that identifies environmental types in images. Our approach establishes a taxonomy for scene categorization with the aim of utilization in mobile robots. We used a motion-skill dataset for training different neural networks. The dataset, comprising 19923 images with nine navigation strategy labels, demonstrated the feasibility of coupling environment recognition with neural networks. Three architectures (ResNet-50, BEiT-Base, ConvNeXt-Tiny) achieved notable performance, with ConvNeXt-Tiny leading with a weighted F1 score of 0.934. Pre-training on the SUN397 dataset significantly improved the model performance, emphasizing the importance of leveraging existing scene categorization datasets. This study highlights the synergy between scene categorization and functional scene categorization, providing insights for designing efficient, context-aware autonomous navigation systems.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10281838" class="vrtx-external-publication">
        <div id="vrtx-publication-10281838">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10281838">
                Rolfsjord, Sigmund Johannes Ljosvoll; Fatima, Safia; Arnim, Hugh Alexander von &amp; Baselizadeh, Adel
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Multimodal Transfer Learning for Privacy in Human Activity Recognition.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Emilia, Barakova,; Ben, Allouch, Somaya; Kazuhiro, Nakadai, &amp; Goldie, Nejat, (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the IEEE International Conference on Robot and Human Interactive Communication (RO-MAN) 2025.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331587710.</span>
                            
                <span class="vrtx-pages">p. 15–20.</span>
            doi: <a href="https://doi.org/10.1109/ro-man63969.2025.11217600">10.1109/ro-man63969.2025.11217600</a>.
            <a href="https://hdl.handle.net/11250/5278783">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">IEEE International Conference on Robot &amp; Human Interactive Communication (RO-MAN)

This conference is a leading forum where state-of-the-art innovative results, the latest developments as well as future perspectives relating to robot and human interactive communication are presented and discussed.

The conference covers a wide range of topics related to Robot and Human Interactive Communication, involving theories, methodologies, technologies, empirical and experimental studies. Papers related to the study of robotic technology, psychology, cognitive science, artificial intelligence, human factors, ethics and policies, interaction-based robot design and other topics related to human-robot interaction are welcome.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10255329" class="vrtx-external-publication">
        <div id="vrtx-publication-10255329">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255329">
                Nerg?rd, Katrine Linnea; Ellefsen, Kai Olav &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Fast or Slow: Adaptive Decision Making in Reinforcement Learning with Pre-Trained LLMs.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Ugur, Emre; Sciutti, Alessandra &amp; Rohlfing, Katharina (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2025 IEEE International Conference on Development and Learning (ICDL).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331543433.</span>
                            
            doi: <a href="https://doi.org/10.1109/ICDL63968.2025.11204357">10.1109/ICDL63968.2025.11204357</a>.
            <a href="https://hdl.handle.net/11250/3661772">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254929" class="vrtx-external-publication">
        <div id="vrtx-publication-10254929">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254929">
                Meijer, Frida; Lindblom, Diana Saplacan; Baselizadeh, Adel &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        &quot;The wooden gripper was warmer and made the robot less threatening&quot;– A Study on Perceived Safety based on Robot Gripper’s Visual and Tactile Properties.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Emilia, Barakova,; Ben, Allouch, Somaya; Kazuhiro, Nakadai, &amp; Goldie, Nejat, (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the IEEE International Conference on Robot and Human Interactive Communication (RO-MAN) 2025.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331587710.</span>
                            
                <span class="vrtx-pages">p. 1091–1098.</span>
            doi: <a href="https://doi.org/https:/ieeexplore.ieee.org/document/11217775">https:/ieeexplore.ieee.org/document/11217775</a>.
            <a href="https://hdl.handle.net/11250/5055682">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">An ageing population and the need of providing adequate care have led to developing robots to relieve healthcare workers and to assist individuals in their own homes. However, the successful integration of robots in such settings relies on more than just ensuring physical safety associated with physical risks (e.g., collisions): it also requires the user’s perceived safety – the users perceiving the robot as not doing any harm. This paper explores the potential influence of a robot grippers’ visual and tactile properties, such as materials and texture, on the users’ perceived safety and comfort of human-robot interaction. An initial survey was distributed to 53 participants, exploring five (n=5) robot gripper designs focusing on the robots’ gripper shape. One design shape was thereafter selected to be constructed as a cover to be placed over the parallel grippers of the TIAGo robot, by using 1) wood filament and 2) plastic. The covers were then tested in an experimental setting with 11 participants. The covers were attached to the TIAGo mobile manipulator robot and participants interacted with both of the designed gripper covers within a controlled laboratory environment. A questionnaire was distributed to all 11 experiment participants, at different stages of the interactions. The findings indicate that the material of the gripper influenced participants’ sense of comfort, familiarity, and perceived capabilities of the robot. The study suggests that perceived safety in human-robot interaction (HRI) is shaped not only by physical factors but also by how materials are personally and contextually interpreted. To better support safe and comfortable interactions, further research is needed to understand how material choices shape users’ perceived safety.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254558" class="vrtx-external-publication">
        <div id="vrtx-publication-10254558">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254558">
                Otterdijk, Marieke van; Neggers, Margot; Torresen, Jim &amp; Barakova, Emilia
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Human Attribution of?Emotional Intent to?Motion Features in?a?Humanoid Robot.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Palinko, Oskar; Bodenhagen, Leon; Cabibihan, John-John; Fischer, Kerstin; ?abanovi?, Selma; Winkle, Katie; Behera, Laxmidhar; Ge, Shuzhi Sam; Chrysostomou, Dimitrios; Jiang, Wanyue &amp; He, Hongsheng (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Social Robotics.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=AD8FEF33-C155-4915-A7BF-A1BE33DDAC4D">Springer</a>.
                </span>
                <span class="vrtx-issn">ISSN 9789819635184.</span>
                            
                <span class="vrtx-pages">p. 323–323.</span>
            doi: <a href="https://doi.org/10.1007/978-981-96-3519-1_29">10.1007/978-981-96-3519-1_29</a>.
            <a href="https://hdl.handle.net/11250/3912285">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254450" class="vrtx-external-publication">
        <div id="vrtx-publication-10254450">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254450">
                Maeda, Ryuichi; Baselizadeh, Adel; Watanabe, Shin; Kurazume, Ryo &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Adaptive Tidying Robots: Learning from Interaction and Observation.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Asfour, Tamim; Ramírez-Amaro, Karinne; Kim, Joohyung &amp; Cheng, Gordon (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2025 IEEE/SICE International Symposium on System Integration (SII).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331531614.</span>
                            
                <span class="vrtx-pages">p. 185–192.</span>
            doi: <a href="https://doi.org/10.1109/sii59315.2025.10871003">10.1109/sii59315.2025.10871003</a>.
            <a href="https://hdl.handle.net/11250/3439760">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10251521" class="vrtx-external-publication">
        <div id="vrtx-publication-10251521">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10251521">
                Otterdijk, Marieke van; Laeng, Bruno; Lindblom, Diana Saplacan; Baselizadeh, Adel &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Seeing Meaning: How Congruent Robot Speech and Gestures Impact Human Intuitive Understanding of Robot Intentions.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        International Journal of Social Robotics.
                </span>
                <span class="vrtx-issn">ISSN 1875-4791.</span>
                            17,
                <span class="vrtx-pages">p. 2279–2292.</span>
            doi: <a href="https://doi.org/10.1007/s12369-025-01271-0">10.1007/s12369-025-01271-0</a>.
            <a href="https://hdl.handle.net/11250/4271533">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Social communication between humans and robots has become critical as a result of the integration of robots into our daily lives as assistants. There is a need to explore how users intuitively understand the behavior of a robot and the impact of social context on that understanding. This study measures mental effort (as indexed by pupil response) and processing time, measured as the time taken to provide the correct answer, to investigate participants’ intuitive understanding of the robot’s gestures. Thirty-two participants participated in a charades game with a TIAGo robot, during which their eyes were tracked. Our findings show a relationship between mental effort and processing time, and indicate that robot gestures, congruence of speech and behavior, and the correctness of interpreting robot behavior influence intuitive understanding. Furthermore, we found that people focused on the robot’s limb movement. Using these findings, we can highlight what features contribute to the intuitive interaction with a robot, thus improving its efficiency.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10251515" class="vrtx-external-publication">
        <div id="vrtx-publication-10251515">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10251515">
                Weng, Yueh-Hsuan; Torabi, David; T?rresen, Jim; Dong, Zonghao &amp; Hirata, Yasuhisa
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Bridging Ethics and Reality: Integrating Thought Experiments and Empirical Insights in Robot Ethics.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        IEEE robotics &amp; automation magazine.
                </span>
                <span class="vrtx-issn">ISSN 1070-9932.</span>
                            32(4),
                <span class="vrtx-pages">p. 84–90.</span>
            doi: <a href="https://doi.org/10.1109/mra.2025.3584352">10.1109/mra.2025.3584352</a>.
            <a href="https://hdl.handle.net/11250/4686188">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10249681" class="vrtx-external-publication">
        <div id="vrtx-publication-10249681">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10249681">
                Fatima, Safia; Ellefsen, Kai Olav &amp; Moonen, Leon
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Self Healing of a Mixed Autonomy Traffic System Using Reinforcement Learning and Attention.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        IEEE Open Journal of Intelligent Transportation Systems.
                </span>
                            6,
                <span class="vrtx-pages">p. 1200–1220.</span>
            doi: <a href="https://doi.org/10.1109/OJITS.2025.3606539">10.1109/OJITS.2025.3606539</a>.
            <a href="https://hdl.handle.net/11250/5033453">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">As urban traffic becomes increasingly complex with the integration of connected and autonomous vehicles alongside human-driven vehicles, there is a critical need for adaptive traffic management systems capable of self-healing in response to disruptions. This paper introduces TS2RLA (“Traffic System Recovery using Reinforcement Learning and Attention”), a novel framework for self-healing in mixed-autonomy traffic systems by combining deep reinforcement learning with an attention mechanism to optimize traffic flow and recover from faults in various scenarios in a mixed-autonomy traffic environment. We evaluated TS2RLA in four complex traffic scenarios: bottleneck, figure-eight, grid, and merge. Our results demonstrate significant improvements over the baseline model, showing an average of 86.74% reduction in crashes, 71% improvement in speed and traffic throughput, and robust performance under diverse and complex traffic conditions. Moreover, our experiments show that TS2RLA leads to a significant reduction in CO2 emissions and fuel consumption. TS2RLA’s attention-based approach shows particular benefits in bottleneck and figure-eight scenarios, demonstrating its ability to adapt to complex, multi-factor traffic situations. For scenarios that TS2RLA had not been trained on before, it performs even more favorably than the baseline, with a 96.8% crash reduction and 95.3% throughput improvement. This shows its ability to adapt effectively to new traffic conditions. Overall, we conclude that TS2RLA could significantly improve the safety, efficiency, and capacity of real-world traffic systems, particularly in dynamic urban environments. As such, our work contributes to the field of intelligent transportation systems by offering a versatile self-healing framework capable of managing the complexities of mixed-autonomy traffic.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2384255" class="vrtx-external-publication">
        <div id="vrtx-publication-2384255">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2384255">
                Orten, Kristine Fjellk?rstad; Helgesen, Sander Elias Magnussen; Chen, Bihui; Baselizadeh, Adel; T?rresen, Jim &amp; Herrebr?den, Henrik
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Can machine learning distinguish between elite and non-elite rowers?                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        International Journal of Computer Science in Sport.
                </span>
                            24(1),
                <span class="vrtx-pages">p. 118–132.</span>
            doi: <a href="https://doi.org/10.2478/ijcss-2025-0007">10.2478/ijcss-2025-0007</a>.
            <a href="https://hdl.handle.net/11250/4632106">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A major challenge for sports coaches and analysts is to identify critical elements of athletes’ movement patterns. A potentially relevant tool is machine learning, useful because of its ability to extract patterns from data. In the current study, we employed various deep learning frameworks, including Gated Recurrent Unit networks (GRUs), Convolutional Neural Networks (CNNs), and Multi-Layer Perceptrons (MLPs), to search for differences between elite and non-elite rowers using a rowing ergometer. The MLP model achieved an accuracy of 100% when using all input features, indicating that the problem is suitable as a machine learning task. Our research focused on using a limited amount of the data. Despite using fewer input features, the models managed to classify skill levels with reasonable precision, reaching a best performance of 77% accuracy for the model combining GRU and CNN architectures, 78% for the GRU model, and 94% for the MLP model. From a rowing perspective, the results suggest that movement coordination between upper and lower body limbs, as represented by different feature combinations, is informative in distinguishing between elites and non-elites. The current work suggests that machine learning may supplement human experts in sports coaching, analytics, and talent identification.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2351837" class="vrtx-external-publication">
        <div id="vrtx-publication-2351837">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2351837">
                Khaksar, Weria; Saplacan, Diana; Bygrave, Lee Andrew &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Robotics in Elderly Healthcare: A Qualitative Analysis of 20 Recent European Research Projects.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ACADEMICREVIEW">
                        ACM Transactions on Human-Robot Interaction.
                </span>
                            14(2).
            doi: <a href="https://doi.org/10.1145/3711936">10.1145/3711936</a>.
            <a href="https://hdl.handle.net/11250/4671276">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255389" class="vrtx-external-publication">
        <div id="vrtx-publication-10255389">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255389">
                Otterdijk, Marieke van; Kwak, Dongho; Baselizadeh, Adel; Lindblom, Diana Saplacan &amp; Torresen, Jim
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Age-Old Gesture: Analyzing the Intuitive Responses to Robot Handshakes Among Seniors and Young Adults.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sgorbissa, Antonio &amp; Wei, Lei (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798350375022.</span>
                            
            doi: <a href="https://doi.org/10.1109/ro-man60168.2024.10731393">10.1109/ro-man60168.2024.10731393</a>.
            <a href="https://hdl.handle.net/11250/3284552">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254670" class="vrtx-external-publication">
        <div id="vrtx-publication-10254670">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254670">
                Baselizadeh, Adel; Lindblom, Diana Saplacan; Khaksar, Weria; Uddin, Md Zia &amp; T?rresen, Jim
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparative Analysis of Vision-Based Sensors for Human Monitoring in Care Robots: Exploring the Utility-Privacy Trade-off.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sgorbissa, Antonio &amp; Wei, Lei (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798350375022.</span>
                            
                <span class="vrtx-pages">p. 1794–1801.</span>
            doi: <a href="https://doi.org/10.1109/ro-man60168.2024.10731223">10.1109/ro-man60168.2024.10731223</a>.
            <a href="https://hdl.handle.net/11250/3319587">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254555" class="vrtx-external-publication">
        <div id="vrtx-publication-10254555">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254555">
                Watanabe, Shin; Horn, Geir; T?rresen, Jim &amp; Ellefsen, Kai Olav
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Improving Robot Skills by Integrating Task and Motion Planning with Learning from Demonstration.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Capi, Genci (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2024 6th International Conference on Control and Robotics.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798331518158.</span>
                            
            doi: <a href="https://doi.org/10.1109/iccr64365.2024.10927043">10.1109/iccr64365.2024.10927043</a>.
            <a href="https://hdl.handle.net/11250/3371738">Full text in Research Archive</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/filter?fundingIdentifier=312333&amp;fundingSource=NFR">View all works in NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10423587" class="vrtx-external-publication">
        <div id="vrtx-publication-10423587">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10423587">
                Ellefsen, Kai Olav
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards?more intelligent robots, that balance deep reasoning?and rapid response.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5488632">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this talk, I first introduce broadly the research done in the group for Robotics and Intelligent Systems (ROBIN) at the University of Oslo, where we work at the intersection of robotics and artificial intelligence both in applied research and in studies aiming to better understand learning and intelligence in nature. After a broad introduction, I go more in depth on a specific research project we are currently enganged in: Predictive and Intuitive Robot Companion (PIRC). In this project, we take inspiration from human reasoning to develop AI agents that are able to balance deep reasoning with rapid response, allowing them to react quickly when needed, and to reason more carefully for cognitively demanding tasks.

Dual Process Theory proposes that humans have two modes of reasoning. A fast, instinctive mode (often called System 1), which is active for instance when we quickly respond to dangers, or when we perform routine tasks; and a slow mode (often called System 2), characterized by deep reasoning and focused attention - which is active when we face new challenges or solve hard problems. Most Deep Learning systems are similar to System 1: They operate quickly, detecting patterns in inputs that are similar to what was present in their training data - a process similar to a human &quot;instinct&quot;. In the project PIRC (Predictive and Intuitive Robot Companion) at the University of Oslo, we do research on how to perform System 2-style reasoning in robots and AI agents, and on how to implement meta-reasoning: The process of selecting between the two reasoning modes to ensure rapid, &quot;instinctive&quot; response when possible, and deep reasoning when required. /ritmo/english/projects/pirc/</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10341695" class="vrtx-external-publication">
        <div id="vrtx-publication-10341695">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10341695">
                Ellefsen, Kai Olav
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvor blir det av de virkelig intelligente robotene?                </span>
                            
            doi: <a href="https://doi.org/https:/www.mn.uio.no/ifi/om/aktuelt/arrangementer/andre/ki-dagen-2026.html">https:/www.mn.uio.no/ifi/om/aktuelt/arrangementer/andre/ki-dagen-2026.html</a>.
            <a href="https://hdl.handle.net/11250/5364708">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10325084" class="vrtx-external-publication">
        <div id="vrtx-publication-10325084">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10325084">
                Ellefsen, Kai Olav
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Seminar: &quot;Fast and Slow Thinking AI&quot;.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5350939">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10325045" class="vrtx-external-publication">
        <div id="vrtx-publication-10325045">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10325045">
                Carnevali, Alice &amp; Ellefsen, Kai Olav
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Will robots do our dishes by 2030? |Euronews Tech Talks.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        EuroNews.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5350908">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Robots are slowly making their way into agriculture, healthcare, and our homes. Should we be worried?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10296123" class="vrtx-external-publication">
        <div id="vrtx-publication-10296123">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10296123">
                Lindblom, Diana Saplacan
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Presentation: ROBOts as Welfare Technologies and Actors for ELderLy Care: A Nordic Model for Integration of Advanced Assistive Technologies (ROBOWELL) pre-kickoff.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5328078">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">ROBOWELL - meeting with the project partners (UiO, KTH, SDU)</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10283071" class="vrtx-external-publication">
        <div id="vrtx-publication-10283071">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283071">
                Lindblom, Diana Saplacan &amp; Murashova, Natalia
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        AI in Society: Virtual and Physical AI.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.uio.no/om/澳门皇冠体育,皇冠足球比分/skole/fagped-dag/program.html">https:/www.uio.no/om/澳门皇冠体育,皇冠足球比分/skole/fagped-dag/program.html</a>.
            <a href="https://hdl.handle.net/11250/5316972">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This talk explores the evolving landscape of artificial intelligence in various societal contexts, focusing on the integration and implications of AI (virtual) tools such as e.g., ChatGPT, Microsoft Copilot, but also the use of &quot;physical AI&quot;, such as in social robots. It showcases practical cases, showing some insights from our initial fieldwork applying the Ethical Risk Assessment of AI in practice (ENACT) in various private and public (learning) organizations, as well as some insights from our work within Human-Robot Interaction and social robots area in the Vulnerability in Robot Societies (VIROS), Predictive and Intuitive Robot Companion (PIRC) research projects, and our recently funded ROBOts as Welfare Technologies and Actors for ELderLy Care: A Nordic Model for Integration of Advanced Assistive Technologies (ROBOWELL) project.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10283068" class="vrtx-external-publication">
        <div id="vrtx-publication-10283068">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283068">
                Lindblom, Diana Saplacan
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Cross-Cultural Study between Norway and Japan on Consent in HRI; The Use of Social Robots in Public and Private Spaces – Users’ Perspectives.
                </span>
                            
            doi: <a href="https://doi.org/https:/2025.roboethics.design/program">https:/2025.roboethics.design/program</a>.
            <a href="https://hdl.handle.net/11250/5316970">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255911" class="vrtx-external-publication">
        <div id="vrtx-publication-10255911">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255911">
                T?rresen, Jim; Prestes, Edson; Caleb-Solly, Praminda; Weng, Yueh Hsuan &amp; Watanabe, Shin
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        CASE 2025 Workshop on Ethical Considerations in Robotics and Automation (WECRA).
                </span>
                            
            doi: <a href="https://doi.org/https:/sites.google.com/view/robot-ethics-case2025-workshop">https:/sites.google.com/view/robot-ethics-case2025-workshop</a>.
            <a href="https://hdl.handle.net/11250/3270887">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255897" class="vrtx-external-publication">
        <div id="vrtx-publication-10255897">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255897">
                T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Keynote: Techno-Ethical Considerations when Applying Machine Learning in Real-world Systems.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.icmlt.org/2025.html">https:/www.icmlt.org/2025.html</a>.
            <a href="https://hdl.handle.net/11250/3440039">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255888" class="vrtx-external-publication">
        <div id="vrtx-publication-10255888">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255888">
                T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Invited talk: Intelligent Robotics in Healthcare.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5119652">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255472" class="vrtx-external-publication">
        <div id="vrtx-publication-10255472">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255472">
                T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Guest lecture: Intelligent Robotics in (Home) Healthcare.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4248233">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255453" class="vrtx-external-publication">
        <div id="vrtx-publication-10255453">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255453">
                T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Invited talk: When will a robot treat us instead of a medical doctor?                </span>
                            
            doi: <a href="https://doi.org/https:/www.uio.no/english/research/strategic-research-areas/life-science/norway-life-science-conference/">https:/www.uio.no/english/research/strategic-research-areas/life-science/norway-life-science-conference/</a>.
            <a href="https://hdl.handle.net/11250/3405365">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255063" class="vrtx-external-publication">
        <div id="vrtx-publication-10255063">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255063">
                Lindblom, Diana Saplacan
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Overview of my research: background, results, experiences, publications and future directions.                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3752451">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255051" class="vrtx-external-publication">
        <div id="vrtx-publication-10255051">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255051">
                Lindblom, Diana Saplacan
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        &quot;The Wooden Gripper Was Warmer and Made the Robot Less Threatening&quot;– a Study on Perceived Safety Based on Robot Gripper’s Visual and Tactile Properties.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4505639">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10251044" class="vrtx-external-publication">
        <div id="vrtx-publication-10251044">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10251044">
                Rolfsjord, Sigmund Johannes Ljosvoll; Arnim, Hugh Alexander von; Fatima, Safia &amp; Baselizadeh, Adel
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Multimodal Transfer Learning for Privacy in Human Activity Recognition.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3514699">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10250376" class="vrtx-external-publication">
        <div id="vrtx-publication-10250376">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10250376">
                Nerg?rd, Katrine Linnea; Ellefsen, Kai Olav &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Fast or Slow: Adaptive Decision Maiking in Reinfocement Learning with Pre-Trained LLMs.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4279013">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2398583" class="vrtx-external-publication">
        <div id="vrtx-publication-2398583">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2398583">
                Ellefsen, Kai Olav
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Fast and Slow Thinking AI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3776030">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2369442" class="vrtx-external-publication">
        <div id="vrtx-publication-2369442">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2369442">
                Saplacan, Diana
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Overview of the Robotics and Intelligent Research Group work within Human-Robot Interaction from the past years.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3540324">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2369439" class="vrtx-external-publication">
        <div id="vrtx-publication-2369439">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2369439">
                Saplacan, Diana
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Overview of the Work Within HRI Focusing on Elderly Care and Healthcare Professionals Views on the Use of Robots within Home- and Healthcare: Lessons Learned.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4992276">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10346985" class="vrtx-external-publication">
        <div id="vrtx-publication-10346985">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10346985">
                Pileberg, Silje &amp; Otterdijk, Marieke van
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mennesket har én egenskap som KI og roboter enn? ikke har.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        澳门皇冠体育,皇冠足球比分.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5369702">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Marieke van Otterdijk jobber for ? gi roboter intuisjon. – Hvis vi lykkes, er det flere etiske sp?rsm?l vi m? stille oss, sier hun.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10255917" class="vrtx-external-publication">
        <div id="vrtx-publication-10255917">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255917">
                Tapus, Adriana; Zhegong, Shangguan; T?rresen, Jim &amp; S?raa, Roger Andre
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        IEEE RO-MAN 2024 Workshop on Ethics Challenges in Socially Assistive Robots and Agents: Legality, Value Orientation, and Future Design for Human-Robot Interaction (HRI).
                </span>
                            
            doi: <a href="https://doi.org/https:/perso.ensta-paris.fr/~shangguan/Ro-manWS/Home.html">https:/perso.ensta-paris.fr/~shangguan/Ro-manWS/Home.html</a>.
            <a href="https://hdl.handle.net/11250/4667535">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255915" class="vrtx-external-publication">
        <div id="vrtx-publication-10255915">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255915">
                T?rresen, Jim; Prestes, Edson; Caleb-Solly, Praminda &amp; Weng, Yueh Hsuan
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        IEEE/RSJ IROS 2024 workshop Ethical, Legal and User Perspectives on Assisting Robots and Systems (WELUPARS).
                </span>
                            
            doi: <a href="https://doi.org/https:/sites.google.com/view/iros-2024-robot-ethics?pli=1&amp;authuser=1">https:/sites.google.com/view/iros-2024-robot-ethics?pli=1&amp;authuser=1</a>.
            <a href="https://hdl.handle.net/11250/4998283">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255432" class="vrtx-external-publication">
        <div id="vrtx-publication-10255432">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255432">
                T?rresen, Jim
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Invited talk: Intelligent robots - the future of healthcare?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4993372">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255415" class="vrtx-external-publication">
        <div id="vrtx-publication-10255415">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255415">
                T?rresen, Jim
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Invited talk: Ethics Integrated in Human-Robot Interaction Research.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.roboethics.design">https:/www.roboethics.design</a>.
            <a href="https://hdl.handle.net/11250/4428222">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255017" class="vrtx-external-publication">
        <div id="vrtx-publication-10255017">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255017">
                Lindblom, Diana Saplacan
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Healthcare Professionals’ Attitudes Towards Caregiving Through Teleoperation of Robots in Elderly Care. Seminar at RITMO Centre of Excellence for Time, Rhythm, and Motion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4261941">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This week&#39;s Food and Paper will be given by Diana Saplacan.
</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2351382" class="vrtx-external-publication">
        <div id="vrtx-publication-2351382">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2351382">
                Ellefsen, Kai Olav
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        For KI er to pluss to like tidkrevende som ? l?se klimakrisen.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-ARTICLEJOURNAL">
                        Morgenbladet.
                </span>
                <span class="vrtx-issn">ISSN 0805-3847.</span>
                            
            
            <a href="https://hdl.handle.net/11250/4673895">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10256193" class="vrtx-external-publication">
        <div id="vrtx-publication-10256193">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10256193">
                Torheim, Kevin Tran &amp; Ellefsen, Kai Olav
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring fast and slow thinking in artificial intelligence.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4650057">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255340" class="vrtx-external-publication">
        <div id="vrtx-publication-10255340">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255340">
                Hasle, Viktor Ringvold &amp; Ellefsen, Kai Olav
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Fast and Slow Reasoning with Model-Based Reinforcement Learning.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4928634">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">After Daniel Kahneman authored the pop-psychology book Thinking, Fast and Slow, the psychological theory of viewing human reasoning as a dual-process system has gained popularity. The dual-process theory states that human reasoning can be divided into two main modes of thinking: System 1, which is fast, intuitive, and automatic, and System 2, which is slow, deliberate, and analytical. This thesis investigates applying a dual-system approach to solving Sokoban puzzles. Sokoban is a logic puzzle game that involves moving blocks onto targets. To model these systems computationally, a learned World Model is developed to predict the consequences of future actions within the Sokoban environment. A World Model is a generative model that learns to model the dynamics of the environment. This model is used in two distinct ways: (1) to train an Actor-Critic policy that acts reflexively within the environment, representing the intuitive behavior of System 1; and (2) as a component of a planning algorithm that embodies the deliberative reasoning of System 2. A higher-level Meta-Agent is trained to dynamically arbitrate between these two modes, selecting how much planning is optimal in any state. The Meta-Agent enables a nuanced allocation of cognitive resources by dynamically adjusting the extent of System 2 processing. This novel approach represents the System 1 and System 2 dichotomy as a spectrum, rather than a binary. Sokoban puzzles are notoriously hard to solve, both computationally as well as for humans. Because of this difficulty, a high degree of exactness is required to solve them reliably. As such, one would expect the most effective reasoning strategy would be to maximize the amount of System 2 thinking. The thesis finds that this is suboptimal, and strategically planning in critical states improves the performance. The high-level Meta-Agent performs better than any pure planning approaches by minimizing the likelihood of the learned World Model generating unwanted errors during planning. This finding suggests that optimal problem-solving in complex environments benefits from a hybrid strategy that balances intuitive and analytical reasoning.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254547" class="vrtx-external-publication">
        <div id="vrtx-publication-10254547">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254547">
                Bjerkan, Mons Eirik; Watanabe, Shin &amp; T?rresen, Jim
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Offline Reinforcement Learning through MPC Bootstrapping.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/120967">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Efficient, safe, and robust learning for robotic tasks in the real world is more relevant than ever, with new robots or software improvements emerging rapidly. Accordingly, the popularity of \gls{rl}, and offline \gls{rl} has also massively increased over the last years. There are, however, issues training robots for real-life tasks using \gls{rl} with concerns of safety and the amount of data needed. Offline reinforcement learning is a paradigm with the promise of alleviating some of these challenges. Control algorithms such as \gls{mpc} also aim to solve these complex issues. This approach of using non-learning-based control algorithms has long been the most dominant paradigm within control of complex systems. And has only become more relevant with the emergence of real-time optimization, such as real-time \gls{mpc}. To leverage the best of both worlds, this paper proposes integrating offline \gls{rl} with \gls{mpc} by training an offline \gls{rl} policy solely on pre-collected \gls{mpc} trajectories, thereby effectively bootstrapping the \gls{rl} policy training process. The algorithm aims to avoid excessive exploration for faster convergence towards stronger solutions by leveraging the already provided solutions from the \gls{mpc}, while exploiting the characteristics of \gls{rl} to make up for any weaknesses in the \gls{mpc} solutions to make a robust, safe, and data-efficient solution for task-specific robot training. The proposed method is evaluated in a simulated environment and tested on a quadruped robot navigating through a hilly terrain. Testing shows that the proposed method does not measure up to the performance of pure \gls{mpc}. However, the results indicate that the proposed method can be applied to learn how to solve simpler control tasks.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254546" class="vrtx-external-publication">
        <div id="vrtx-publication-10254546">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254546">
                Sandanger, Tonje Viddal; T?rresen, Jim; Wiig, Ola; Homlong, Eirik Gromholt &amp; Kumar, Rahul Prasanna
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Enhancing Gait Analysis through eXplainable AI.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3905357">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Etter kvart som kunstig intelligente (KI) system vert integrert i aukande grad i helsevesenet, er behovet for transparente, p?litelege og klinisk meiningsfulle verkt?y viktigare en nokon gang. Denne avhandlinga unders?kjer bruken av maskinl?ring og forklarbar KI i klinisk ganganalyse for barn med Celebral Parese (CP). Ved ? utnytte r?rsle-data fr? r?ynda, utforskar denne studia bruken av djupe-l?rings modeller som CNN-LSTM, RNN-LSTM, og ST-GCN, for ? sj? korleis desse samanliknast med enklare modellar som RFar. Desse modellane vart trent til ? klassifisere Gross Motor Function Classification System (GMFCS) niv? hos barn med CP. Ettersom behovet for p?litelege og forklarbare applikasjonar er avgjerande i helsevesenet, vart SHAP brukt til ? lage forklaringer av modellane sine predikasjonar, noko som mogleggjer eit transparent og tolkbart system. Resultata vise at meir komplekse modellar, som ST-GCNane, vart utkonkurrert til fordel for enklare modellar under dataavgrensande forhald, og at dei best ytande modellane vis signifikante resultat for forklaringer med fokus p? klinisk relevante funksjonar som korresponderer med det r?rte eller kontrar?rte beinet. Dette arbeidet framheva óg r?ynda av n?ye modellvalg n?r ein designar forklarbare KI system, ettersom at SHAP er sv?rt modellavhengig. Det vart derimot funne at n?r ein brukar ensamble-metodar, vart den totale betydninga av funksjonane fokusert p? klinisk relevante plan og ledd, noko som framhev den lovande framtida til forklarbare system. Nokon nye bidrag fr? arbeidet er dei tidsmessig justerte SHAP-varmekarta, overlappa gangsyklusen, som mogeleggjer for klinikarar ? spore modell- slutningar til spesifikke biomekaniske hendingar. Sj?lv om arbeidet bidrar til den aukande integrasjonen av KI-system i helsevesenet, st?r det att ? validere den kliniske nytteverda gjennom direkte tilbakemeldingar fr? klinikarar. Framtidig arbeid burde involvere bruker-sentrerte evalueringar, som integrerer domeneekspertar som gir ytterlegare innsikt i tolkbarheiten, relevansen og brukarvenlegheita til forklaringane. Ved ? samk?yre denne forskinga med klinisk praksis legg den grunnlaget for bruk av forklarbar KI verkt?y i klinisk ganganalyseapplikasjonar, ved ? fokusere p? openheit og forklarbarheit, samtidig som etiske praksisar vert overheldt.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2277550" class="vrtx-external-publication">
        <div id="vrtx-publication-2277550">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2277550">
                Lindblom, Diana Saplacan; T?rresen, Jim &amp; Hakimi, Nina
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Dynamic Dimensions of Safety -
How robot height and velocity affect human-robot interaction: An explorative study on the concept of perceived safety.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4845825">Full text in Research Archive</a>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/filter?fundingIdentifier=312333&amp;fundingSource=NFR">View all works in NVA</a></p>
    </div>

      </div>
    </div>



	  
            
      
        <div class="financing">
          <h2>Funding</h2>

          
            <div class="financing-info-wrapper">
            



          
            
            <div class="financing-info">
            <img lang="en" src="/vrtx/dist/resources/uio2/css/images/partner-logos/funded-research-council.svg" class="logo-img" alt="Funded by The Research Council of Norway" loading="lazy"/>
            
            
              <p class="financing-info-project-number"><span>Project number: </span><span>312333</span></p>
            
            </div>
          
          </div>
        
        
      
        </div>
      
            
      
        <div class="collaborators">
          <h2>Collaborators</h2>
          <div><p>Sunnaas Rehabilitation Hospital,&nbsp;Norway</p>
</div>
        </div>
      
            
      
            
      
        <a id="vrtx-change-language-link" href="/ritmo/prosjekter/pirc/index.html">
          Norwegian<span class="offscreen-screenreader">
            version of this page
          </span>
        </a>
      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Published</span> <span class="published-date">Jan. 24, 2025 12:30 PM </span>
        
          - <span class="last-modified-date">Last modified</span> <span class="last-modified-date">May 28, 2025 1:18 PM</span>
        
        </div>
      
          </div>
        </div>
      
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo/english">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Contact information</h2>
   <p><a href="/ritmo/english/about/">Contact us</a><br>
   <a href="/english/about/getting-around/areas/gaustad/ga09/">Find us</a></p>
</div>
<div>
   <h2>About the website</h2>
   <p><a href="/english/about/regulations/privacy-declarations/privacy-policy-web.html">Cookies</a><br>
   <a href="/ritmo/english/projects/pirc/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Accessibility statement (in Norwegian only)</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Responsible for this page</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/english/projects/pirc/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Log in
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/english/" title="Go to uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--a4d1bc0e1742c08b--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/a4d1bc0e1742c08b--></body>
</html>
