<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD with MathML3 v1.2 20190208//EN" "JATS-archivearticle1-mathml3.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="1.2"><front><journal-meta><journal-id journal-id-type="nlm-ta">elife</journal-id><journal-id journal-id-type="publisher-id">eLife</journal-id><journal-title-group><journal-title>eLife</journal-title></journal-title-group><issn publication-format="electronic" pub-type="epub">2050-084X</issn><publisher><publisher-name>eLife Sciences Publications, Ltd</publisher-name></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">77772</article-id><article-id pub-id-type="doi">10.7554/eLife.77772</article-id><article-categories><subj-group subj-group-type="display-channel"><subject>Tools and Resources</subject></subj-group><subj-group subj-group-type="heading"><subject>Neuroscience</subject></subj-group></article-categories><title-group><article-title>Deep learning-based feature extraction for prediction and interpretation of sharp-wave ripples in the rodent hippocampus</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes" id="author-272659"><name><surname>Navas-Olive</surname><given-names>Andrea</given-names></name><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">&#8224;</xref><xref ref-type="fn" rid="fn1">&#8225;</xref><xref ref-type="other" rid="fund2"/><xref ref-type="fn" rid="con1"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" equal-contrib="yes" id="author-272660"><name><surname>Amaducci</surname><given-names>Rodrigo</given-names></name><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">&#8224;</xref><xref ref-type="fn" rid="fn1">&#8225;</xref><xref ref-type="other" rid="fund3"/><xref ref-type="fn" rid="con2"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-152531"><name><surname>Jurado-Parras</surname><given-names>Maria-Teresa</given-names></name><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="con3"/><xref ref-type="fn" rid="conf2"/></contrib><contrib contrib-type="author" id="author-272661"><name><surname>Sebastian</surname><given-names>Enrique R</given-names></name><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="con4"/><xref ref-type="fn" rid="conf2"/></contrib><contrib contrib-type="author" corresp="yes" id="author-116305"><name><surname>de la Prida</surname><given-names>Liset M</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0002-0160-6472</contrib-id><email>lmprida@cajal.csic.es</email><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="con5"/><xref ref-type="fn" rid="conf3"/></contrib><aff id="aff1"><label>1</label><institution-wrap><institution-id institution-id-type="ror">https://ror.org/012gwbh42</institution-id><institution>Instituto Cajal, CSIC</institution></institution-wrap><addr-line><named-content content-type="city">Madrid</named-content></addr-line><country>Spain</country></aff><aff id="aff2"><label>2</label><institution-wrap><institution-id institution-id-type="ror">https://ror.org/01cby8j38</institution-id><institution>Grupo de Neurocomputaci&#243;n Biol&#243;gica (GNB), Universidad Aut&#243;noma de Madrid</institution></institution-wrap><addr-line><named-content content-type="city">Madrid</named-content></addr-line><country>Spain</country></aff></contrib-group><contrib-group content-type="section"><contrib contrib-type="editor"><name><surname>Peyrache</surname><given-names>Adrien</given-names></name><role>Reviewing Editor</role><aff><institution-wrap><institution-id institution-id-type="ror">https://ror.org/01pxwe438</institution-id><institution>McGill University</institution></institution-wrap><country>Canada</country></aff></contrib><contrib contrib-type="senior_editor"><name><surname>Huguenard</surname><given-names>John R</given-names></name><role>Senior Editor</role><aff><institution-wrap><institution-id institution-id-type="ror">https://ror.org/00f54p054</institution-id><institution>Stanford University School of Medicine</institution></institution-wrap><country>United States</country></aff></contrib></contrib-group><author-notes><fn fn-type="con" id="equal-contrib1"><label>&#8224;</label><p>These authors contributed equally to this work</p></fn><fn fn-type="other" id="fn1"><label>&#8225;</label><p>Co-shared first author</p></fn></author-notes><pub-date publication-format="electronic" date-type="publication"><day>05</day><month>09</month><year>2022</year></pub-date><pub-date pub-type="collection"><year>2022</year></pub-date><volume>11</volume><elocation-id>e77772</elocation-id><history><date date-type="received" iso-8601-date="2022-02-10"><day>10</day><month>02</month><year>2022</year></date><date date-type="accepted" iso-8601-date="2022-09-02"><day>02</day><month>09</month><year>2022</year></date></history><pub-history><event><event-desc>This manuscript was published as a preprint at .</event-desc><date date-type="preprint" iso-8601-date="2022-03-16"><day>16</day><month>03</month><year>2022</year></date><self-uri content-type="preprint" xlink:href="https://doi.org/10.1101/2022.03.11.483905"/></event></pub-history><permissions><copyright-statement>&#169; 2022, Navas-Olive, Amaducci et al</copyright-statement><copyright-year>2022</copyright-year><copyright-holder>Navas-Olive, Amaducci et al</copyright-holder><ali:free_to_read/><license xlink:href="http://creativecommons.org/licenses/by/4.0/"><ali:license_ref>http://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This article is distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License</ext-link>, which permits unrestricted use and redistribution provided that the original author and source are credited.</license-p></license></permissions><self-uri content-type="pdf" xlink:href="elife-77772-v2.pdf"/><self-uri content-type="figures-pdf" xlink:href="elife-77772-figures-v2.pdf"/><abstract><p>Local field potential (LFP) deflections and oscillations define hippocampal sharp-wave ripples (SWRs), one of the most synchronous events of the brain. SWRs reflect firing and synaptic current sequences emerging from cognitively relevant neuronal ensembles. While spectral analysis have permitted advances, the surge of ultra-dense recordings now call for new automatic detection strategies. Here, we show how one-dimensional convolutional networks operating over high-density LFP hippocampal recordings allowed for automatic identification of SWR from the rodent hippocampus. When applied without retraining to new datasets and ultra-dense hippocampus-wide recordings, we discovered physiologically relevant processes associated to the emergence of SWR, prompting for novel classification criteria. To gain interpretability, we developed a method to interrogate the operation of the artificial network. We found it relied in feature-based specialization, which permit identification of spatially segregated oscillations and deflections, as well as synchronous population firing typical of replay. Thus, using deep learning-based approaches may change the current heuristic for a better mechanistic interpretation of these relevant neurophysiological events.</p></abstract><abstract abstract-type="plain-language-summary"><title>eLife digest</title><p>Artificial intelligence is finding greater use in society through its ability to process data in new ways. One particularly useful approach known as convolutional neural networks is typically used for image analysis, such as face recognition. This type of artificial intelligence could help neuroscientists analyze data produced by new technologies that record brain activity with higher resolution.</p><p>Advanced processing could potentially identify events in the brain in real-time. For example, signals called sharp-wave ripples are produced by the hippocampus, a brain region involved in forming memories. Detecting and interacting with these events as they are happening would permit a better understanding of how memory works. However, these signals can vary in form, so it is necessary to detect several distinguishing features to recognize them.</p><p>To achieve this, Navas-Olive, Amaducci et al. trained convolutional neural networks using signals from electrodes placed in a region of the mouse hippocampus that had already been analyzed, and &#8216;telling&#8217; the neural networks whether they got their identifications right or wrong. Once the networks learned to identify sharp-wave ripples from this data, they could then apply this knowledge to analyze other recordings. These included datasets from another part of the mouse hippocampus, the rat brain, and ultra-dense probes that simultaneously assess different brain regions. The convolutional networks were able to recognize sharp-wave ripple events across these diverse circumstances by identifying unique characteristics in the shapes of the waves.</p><p>These results will benefit neuroscientists by providing new tools to explore brain signals. For instance, this could allow them to analyze the activity of the hippocampus in real-time and potentially discover new aspects of the processes behind forming memories.</p></abstract><kwd-group kwd-group-type="author-keywords"><kwd>ripples</kwd><kwd>convolutional neural networks</kwd><kwd>Neuropixels</kwd><kwd>hippocampus</kwd><kwd>dorsoventral</kwd></kwd-group><kwd-group kwd-group-type="research-organism"><title>Research organism</title><kwd>Mouse</kwd><kwd>Rat</kwd></kwd-group><funding-group><award-group id="fund1"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100010434</institution-id><institution>Fundacion La Caixa</institution></institution-wrap></funding-source><award-id>LCF/PR/HR21/52410030</award-id><principal-award-recipient><name><surname>de la Prida</surname><given-names>Liset M</given-names></name></principal-award-recipient></award-group><award-group id="fund2"><funding-source><institution-wrap><institution>Ministerio de Educacion</institution></institution-wrap></funding-source><award-id>FPU17/03268</award-id><principal-award-recipient><name><surname>Navas-Olive</surname><given-names>Andrea</given-names></name></principal-award-recipient></award-group><award-group id="fund3"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/501100004593</institution-id><institution>Universidad Aut&#243;noma de Madrid</institution></institution-wrap></funding-source><award-id>FPI-UAM-2017</award-id><principal-award-recipient><name><surname>Amaducci</surname><given-names>Rodrigo</given-names></name></principal-award-recipient></award-group><funding-statement>The funders had no role in study design, data collection and interpretation, or the decision to submit the work for publication.</funding-statement></funding-group><custom-meta-group><custom-meta specific-use="meta-only"><meta-name>Author impact statement</meta-name><meta-value>A new method is described to identify sharp-wave ripples from the rodent hippocampus with deep learning techniques, which may help to identify and characterize previously undetected physiological events.</meta-value></custom-meta></custom-meta-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Interpreting brain signals is essential in understand cognition and behavior. Biologically relevant oscillations are considered reliable markers of brain operation (<xref ref-type="bibr" rid="bib8">Buzs&#225;ki et al., 2012</xref>; <xref ref-type="bibr" rid="bib23">Friston et al., 2015</xref>). Thus, analysis of either surface electroencephalography (EEG) or intracranial local field potential (LFP) is typically based on spectral methods relying on gold-standard definitions (<xref ref-type="bibr" rid="bib46">Niedermeyer and Silva, 2005</xref>). However, other features of EEG/LFP signals such as the slope, polarity, and latency to events are equally important (<xref ref-type="bibr" rid="bib42">Modi and Sahin, 2017</xref>). While interpreting neurophysiological signals is strongly influenced by this heuristics, methodological issues limit further advances.</p><p>During memory consolidation and retrieval, the hippocampal system releases short memory traces in the form of neuronal sequences (<xref ref-type="bibr" rid="bib30">Joo and Frank, 2018</xref>; <xref ref-type="bibr" rid="bib54">Pfeiffer, 2020</xref>; <xref ref-type="bibr" rid="bib53">Pfeiffer and Foster, 2015</xref>). Such activity comes often in tandem with spatially segregated oscillations (100&#8211;250 Hz) and LFP deflections dubbed sharp-wave ripples (SWRs) (<xref ref-type="bibr" rid="bib9">Buzs&#225;ki, 2015</xref>). They result from active recruitment of dedicated cell-type-specific microcircuits (<xref ref-type="bibr" rid="bib14">de la Prida, 2020</xref>; <xref ref-type="bibr" rid="bib65">Stark et al., 2014</xref>; <xref ref-type="bibr" rid="bib69">Valero et al., 2015</xref>). SWR-associated sequences can either replay previous experience or preplay internal representations (<xref ref-type="bibr" rid="bib18">Farooq and Dragoi, 2019</xref>; <xref ref-type="bibr" rid="bib21">Foster, 2017</xref>; <xref ref-type="bibr" rid="bib30">Joo and Frank, 2018</xref>), making their automatic detection crucial in understanding memory function. However, while spectral-based filters have permitted real-time SWR-related interventions (<xref ref-type="bibr" rid="bib20">Fern&#225;ndez-Ruiz et al., 2019</xref>; <xref ref-type="bibr" rid="bib24">Girardeau et al., 2009</xref>; <xref ref-type="bibr" rid="bib29">Jadhav et al., 2012</xref>), these methods are not optimal to disambiguate the underlying variability of a wealth of events, especially during online operation. Moreover, with the advent of ultra-dense recordings, the need for automatic identification is pressing. In spite of recent advances (<xref ref-type="bibr" rid="bib16">Dutta et al., 2019</xref>; <xref ref-type="bibr" rid="bib27">Hagen et al., 2021</xref>), current solutions still require improvement to capture the complexity of SWR events across hippocampal layers.</p><p>Here, we exploit the extraordinary capability of convolutional neural networks (CNNs) for real-time recognition to identify SWR (<xref ref-type="bibr" rid="bib3">Bai et al., 2018</xref>). Instead of adopting standard approaches used for temporal data such as in speech recognition, we chose to rely on unfiltered LFP profiles across hippocampal strata as individual data points making up an image. The one-dimensional object is equivalent to a clip of one-row pixels with as many colors as LFP channels. We show how one-dimensional CNN operating over high-density LFP hippocampal signals overcome spectral methods in detecting a large variety of SWR. Moreover, we develop a strategy to decode and explain CNN operation. In doing so, we discovered some features of SWR that permit their detection at distant layers when applied to Neuropixels recordings (<xref ref-type="bibr" rid="bib31">Jun et al., 2017</xref>). Using these tools allow for a more comprehensive interpretation of SWR signatures across the entire hippocampal system.</p></sec><sec id="s2" sec-type="results"><title>Results</title><sec id="s2-1"><title>Artificial neural network architecture and operation</title><p>Inspired by You-Only-Look-Once (YOLO) networks for real-time object recognition (<xref ref-type="bibr" rid="bib56">Redmon et al., 2015</xref>), we adapted a CNN architecture to search for SWR in the dorsal hippocampus of awake head-fixed mice. LFP signals acquired with high-density 8-channel silicon probes provide detailed information about the underlying CA1 microcircuit (<xref ref-type="fig" rid="fig1">Figure 1A</xref>; <xref ref-type="bibr" rid="bib41">Mizuseki et al., 2011</xref>; <xref ref-type="bibr" rid="bib44">Navas-Olive et al., 2020</xref>). The goal of the artificial network operating over 8-channel input signals (down-sampled at 1250 Hz) was to provide a single-output probability for the occurrence of an SWR event in a given temporal window (<xref ref-type="fig" rid="fig1">Figure 1A</xref>, bottom trace). Therefore, the input &#8216;object&#8217; is equivalent to a stream of pixels (&#215;1 number of data samples) with 8-channels instead of colors.</p><fig-group><fig id="fig1" position="float"><label>Figure 1.</label><caption><title>Convolutional neural network (CNN) definition and operation.</title><p>(<bold>A</bold>) Example of a sharp-wave ripple (SWR) event recorded with 8-channel silicon probes in the dorsal CA1 hippocampus of head-fixed awake mice. Vertical lines mark the analysis window (32 ms). The probability of SWR event from each window is shown at bottom. (<bold>B</bold>) Example of L1 kernel operation and calculation of the kernel activation (KA) signal. (<bold>C</bold>) Network architecture consists of seven blocks of one Convolutional layer+one BatchNorm layer+one Leaky ReLU layer each (layers 1&#8211;21). Dense layer 22 provides the CNN output as the SWR probability. (<bold>D</bold>) Examples of KA for layers 1&#8211;4 resulting from the SWR event shown in A. Note how the 8-channel local field potential (LFP) input is progressively transformed to capture different features of the event. (<bold>E</bold>) Example of the CNN output (i.e. KA of layer 22) at 32 ms resolution. A probability threshold can be used to identify SWR events. Note that some events can be predicted well in advance.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig1.jpg"/></fig><fig id="fig1s1" position="float" specific-use="child-fig"><label>Figure 1&#8212;figure supplement 1.</label><caption><title>Network definition and parameters.</title><p>(<bold>A</bold>) Preliminary evaluation of two different architectures, convolutional neural network (CNN), and long short-term memory (LSTM) networks, as well as different learning rates, number of kernels factor, and batch sizes. The resulting 10-best networks exhibited performance F1&gt;0.65 (green scale) at 32 ms resolution. Arrowheads indicate CNN32. Worst performance networks are shown in gray. (<bold>B</bold>) Evolution of the loss value during training of the 10-best networks shown in A. CNN32 exhibited the lowest and more stable learning curve (arrowhead). (<bold>C</bold>) Evolution of the loss function error across epochs for the training and test subsets, excluding overfitting issues. (<bold>D</bold>) Evaluation of the parameters of the Butterworth filter exhibiting performance F1&gt;0.65 (green values), similar to the CNN. The chosen parameters (100&#8211;300 Hz bandwidth and order 2) are indicated by arrowheads. We found no effect on the number of channels used for the filter (1, 4, and 8 channels), and chose that with the higher ripple power. (<bold>E</bold>) Extended hyper-parameter search for different optimization algorithms (Adam and AMSGrad), regularizing strategies, and the learning rate decay (781 parameter combinations). F1 values of the 30-best networks are shown (green values). Worst performance networks are in gray. Arrowheads indicate the chosen model. (<bold>F</bold>) Scheme of the experimental setup for online detection. CNN operated in real time at the interface between the Intan recording system and the controller of an opto-electrode probe. A sharp-wave ripple (SWR) event (right) illustrates detection over threshold. Detection was implemented using a plugin designed to incorporate TensorFlow into the OE (<xref ref-type="bibr" rid="bib61">Siegle et al., 2017</xref>) Graphic User Interface <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/CNNRippleDetectorOEPlugin">https://github.com/PridaLab/CNNRippleDetectorOEPlugin</ext-link>. (<bold>G</bold>) Example of an online closed-loop intervention (blue shadow) in a PV-cre mouse injected with AAV-DIO-ChR2 to optogenetically modulate SWR.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig1-figsupp1.jpg"/></fig></fig-group><p>Convolutional layers search for particular features in the input data by using kernels. The kernels of the first layer (L1) have dimensions of 8-channels &#215; length, with length reflecting the number of data samples. They advance along the temporal axis moving forward a similar number of non-overlapping samples defined by the stride (<xref ref-type="fig" rid="fig1">Figure 1B</xref>). The result of this operation is the kernel activation (KA) signal, which reflects the presence of some input features. L1 kernel length should be defined by considering the desired output resolution of the network. To ease subsequent online applications, we chose either 32 ms (CNN32, L1 kernel length 5) or 12.8 ms resolution (CNN12, L1 kernel length 2).</p><p>Our CNN operates by receiving the 8-channels input into each of the four kernels of L1 (<xref ref-type="fig" rid="fig1">Figure 1C</xref>). Kernels process the LFP and output a KA signal (<xref ref-type="fig" rid="fig1">Figure 1D</xref>). After passing through L1, the 8-channels are transformed into 4-channels, one per kernel (e.g. L1K1, L1K2, etc.). L1 output is then transformed by a BatchNorm layer (L2) and a Leaky ReLU layer (L3), before entering the next block (L4-L5-L6 and so on; <xref ref-type="fig" rid="fig1">Figure 1C</xref>). The size of subsequent kernels is defined by the input data from the Convolutional layers of the previous block (see Materials and methods). Inspired by YOLO, we staggered blocks with kernels of large and short length to allow for alternate convolution of the temporal and channel axes. As data are processed along these blocks, resolution decreases and hence the kernel length becomes progressively shorter.</p><p>We defined a suitable number of blocks that optimized the input (8 channels) and output features (1 channel output at 32 ms or 12.8 resolution), resulting in seven blocks for a total of 21 layers (<xref ref-type="fig" rid="fig1">Figure 1C</xref>). The final layer (L22) is a Dense layer with a sigmoidal activation function, so that the CNN output (between 0 and 1) can be interpreted as the SWR probability. An SWR event can be detected using an adjustable probability threshold (<xref ref-type="fig" rid="fig1">Figure 1E</xref>). Note that our CNN network operates along all streamed LFP data without any specification of the ongoing oscillatory state (i.e. theta or non-theta segments accompanying running and immobility periods, respectively).</p></sec><sec id="s2-2"><title>CNN training and performance offline and online</title><p>Having defined the main network architecture, we used a dataset manually tagged by an expert for training and initial validation (1794 events, two sessions from two mice; <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>). An important decision we made was manually annotating the start and the end of SWR events so that the CNN could learn their onset.</p><p>Given the large number of parameter combinations, we run two optimization rounds using training and test chunks from the training dataset. We first tested a subset of hyper-parameters to look for the 10-best networks (<xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1A</xref>, green shaded), and chose the one with the lowest and more stable learning curve (<xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1B</xref>, arrowhead). Stabilization of the loss function error for the training and test subsets along epochs excluded potential overfitting (<xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1C</xref>). In order to compare CNN performance against spectral methods, we implemented a Butterworth filter, which parameters were optimized using the same training set (<xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1D</xref>). A subsequent hyper-parameter search (781 combinations) confirmed that the trained CNN was in the top-30 group (<xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1E</xref>). A code notebook is available at <ext-link ext-link-type="uri" xlink:href="https://colab.research.google.com/github/PridaLab/cnn-ripple/blob/main/src/notebooks/cnn-example.ipynb">https://colab.research.google.com/github/PridaLab/cnn-ripple/blob/main/src/notebooks/cnn-example.ipynb</ext-link>. The trained model is accessible at the GitHub repository both for Python: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/cnn-ripple">https://github.com/PridaLab/cnn-ripple</ext-link> (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:b38a5db56c84c61821347b603dd884169d8f7b1c;origin=https://github.com/PridaLab/cnn-ripple;visit=swh:1:snp:5785d27d319d84076a2353540d821d26346be009;anchor=swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b">swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b</ext-link>; <xref ref-type="bibr" rid="bib1">Amaducci and Navas-Olive, 2021</xref>) and MATLAB: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/cnn-matlab">https://github.com/PridaLab/cnn-matlab</ext-link>; (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:0e369cb7c13f28d016e1b55a1f3e0242bc91ec91;origin=https://github.com/PridaLab/cnn-matlab;visit=swh:1:snp:0eef36d3eec9e00833377db989809596bda847ac;anchor=swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33">swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33</ext-link>; <xref ref-type="bibr" rid="bib45">Navas-Olive and Esparza, 2022</xref>).</p><p>We assessed the offline performance of the chosen CNN, as compared to the Butterworth filter as the gold standard, using additional tagged sessions never used for training (5695 events from n=15 sessions from five mice; <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>). Performance was evaluated by calculating the precision (P, proportion of correct predictions over all predictions), recall (R, proportion of correct predictions over ground truth events, also known as sensitivity), and F1 values (harmonic mean of precision and recall). The P-R curve depicted better offline operation of both the CNN12 and CNN32 as compared with the filter (<xref ref-type="fig" rid="fig2">Figure 2A</xref>, left). To make the CNN and the filter thresholds comparable, we normalized their values by the best threshold performance (0.7 probability threshold for the CNN, 5SD for the filter). When we considered the relationship between performance and the detection threshold, we found that the CNN was more robust than the filter (<xref ref-type="fig" rid="fig2">Figure 2A</xref>, right). Filter thresholds had effect in biasing detection of SWR, which exhibited different mean feature values (frequency and power) (<xref ref-type="fig" rid="fig2">Figure 2B</xref>, upper plots). In contrasts, mean features of SWR detected by the CNN did not depend on the threshold and were consistent with the ground truth (<xref ref-type="fig" rid="fig2">Figure 2B</xref>, bottom).</p><fig id="fig2" position="float"><label>Figure 2.</label><caption><title>Convolutional neural network (CNN) performance.</title><p>(<bold>A</bold>) Offline P-R curve (mean is dark; sessions are light) (left), and F1 score as a function of normalized thresholds for the CNN at 32 and 12.8 ms resolution as compared with the Butterworth filter (right). Data reported as mean&#177;95% confidence interval for validation sessions (n=15 sessions; five mice). (<bold>B</bold>) Comparison of mean sharp-wave ripple (SWR) features (frequency, power, high-frequency band contribution, and spectral entropy) of events detected offline by the filter (upper plots) and the CNN32 (bottom) as a function of the threshold. The mean best threshold is indicated (5SD for the filter, 0.7 probability for the CNN). Note effect of the threshold in the mean frequency value (Kruskal-Wallis, Chi2(7)=30.5, p&lt;0.0001; post hoc tests *, p&lt;0.05; **, p&lt;0.001) and the power (Kruskal-Wallis, Chi2(7)=16.4, p=0.0218) for the filter but not for the CNN. Note also, differences against the mean value in the ground truth (GT). Mean data from n=15 sessions; five mice. (<bold>C</bold>) Online detection performance of CNN12 as compared with the Butterworth filter (n=8 sessions, t-test p=0.0047; n=5 mice, t-test p=0.033). (<bold>D</bold>) Mean and per session P-R curve (left), and F1 score as a function of the optimized threshold for online sessions, as analyzed post hoc (right). Data from n=8 sessions from five mice.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig2.jpg"/></fig><p>The offline analysis presented above was possible because the ground truth was already known. In real case scenarios, the experimenter has to rely in relatively arbitrary threshold settings. To evaluate this further, we performed a new set of experiments for real-time detection in the Open Ephys (OE) environment (<xref ref-type="bibr" rid="bib61">Siegle et al., 2017</xref>) (eight sessions from five mice). To this purpose, we developed a plugin designed to incorporate TensorFlow, an open-source library for machine learning applications, into the OE graphic user interface (<xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1F, G</xref>; <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>). To be consistent with detection standards (<xref ref-type="bibr" rid="bib20">Fern&#225;ndez-Ruiz et al., 2019</xref>), the online filter was applied to the channel with maximal ripple power and an additional non-ripple channel was used to veto detection of common artifacts. We found better online performance of the CNN at 12.8 ms resolution as compared with the filter (<xref ref-type="fig" rid="fig2">Figure 2C</xref>; per session p=0.0047; per mice p=0.033). When it came to the ability to anticipate SWR events online, the CNN slightly overtook the Butterworth filter (time-to-SWR-peak for CNN12: &#8211;7.01&#177;2.05 ms; Butterworth filter: &#8211;4.66&#177;2.87 ms; paired t-test, p=0.048). A post hoc offline evaluation of online sessions confirmed better performance of the CNN versus the filter, for all normalized thresholds (<xref ref-type="fig" rid="fig2">Figure 2D</xref>).</p></sec><sec id="s2-3"><title>Detection limits of SWR and their influences on CNN operation</title><p>Are there any practical detection limit for SWR? How good is CNN performance and how much is it determined by the expert heuristics?</p><p>First, we sought to compare CNN and the filter at its maximal capability using data from all validation sessions (offline and online: 22 sessions from 10 mice). To this purpose, we equated the methods using the best possible detection threshold per session (the one that optimized F1) and found roughly similar values (<xref ref-type="fig" rid="fig3">Figure 3A</xref>; CNN12: F1=0.68 &#177; 0.06; CNN32: F1=0.63 &#177; 0.05; Butterworth filter: F1=0.65 &#177; 0.11), indicating the CNN meet the gold standard provided the filter is optimized. Note that this can only be possible because we know the ground truth. Remarkably, the filter exhibited larger variability across sessions. Our CNN performed similar to a filter-based optimized algorithm (F1=0.65 &#177; 0.11) (<xref ref-type="bibr" rid="bib16">Dutta et al., 2019</xref>), but significantly better than RippleNET, a recurrent network designed to detect SWR mostly during periods of immobility (F1=0.31 &#177; 0.22; p&lt;0.00001 one-way ANOVA for comparisons with both CNN12 and CNN32) (<xref ref-type="bibr" rid="bib27">Hagen et al., 2021</xref>). This supports similar operation of CNN as compared with the gold standard in conditions when optimized detection is possible (i.e. when the ground truth is known).</p><fig id="fig3" position="float"><label>Figure 3.</label><caption><title>Effects of different experts&#8217; ground truth on convolutional neural network (CNN) performance.</title><p>(<bold>A</bold>) Comparison between the CNN and Butterworth filter using thresholds that optimized F1 per session (22 recordings sessions from 10 mice). Note that this optimization process can only be implemented when the ground truth (GT) is known. (<bold>B</bold>) A subset of data annotated independently by two experts was used to evaluate the ability of each method to identify events beyond the individual ground truth. The original expert provided data for training and validation of the CNN. The new expert tagged events independently in a subset of sessions (14 sessions from seven mice). The performance of CNN, but not that of the filter, was significantly better when confronted with the consolidated ground truth (one-way ANOVA for the type of ground truth for CNN32 F(2)=0.01, p=0.0128 and CNN12 F(2)=0.01, p=0.0257). Significant effect of methods when applied to the consolidated ground truth (one-way ANOVA F(2)=0.02, p=0.0331; rightmost); post hoc tests **, p&lt;0.01; ***, p&lt;0.005. CNN models and the filter were applied at mean best performance threshold. (<bold>C</bold>) Performance obtained from the experts&#8217; ground truth when acting as a mutual classifier (n=14 sessions). Note that this provides an estimation of the maximal performance level. (<bold>D</bold>) We used the hc-11 dataset (<xref ref-type="bibr" rid="bib26">Grosmark and Buzs&#225;ki, 2016</xref>) at the CRCNS public repository (<ext-link ext-link-type="uri" xlink:href="https://crcns.org/data-sets/hc/hc-11/about-hc-11">https://crcns.org/data-sets/hc/hc-11/about-hc-11</ext-link>) to further evaluate the effect of the definition of the ground truth and to test for the CNN generalization capability. The data consisted in 10-channel high-density recordings from the CA1 region of freely moving rats. We randomly selected 8-channels to cope with inputs dimension of our CNN, which was not retrained. The dataset comes with annotated sharp-wave ripple (SWR) events (dark shadow) defined by stringent criteria (coincidence of both population synchrony and SWR). CNN False Positives defined by this partially annotated ground truth were re-reviewed and validated (light shadow). (<bold>E</bold>) Performance of the original CNN, without retraining, at both temporal resolutions over the originally annotated (dark colors) and after False Positives validation (light colors). Performance of the Butterworth filter is also shown. Paired t-test at *, p&lt;0.05; **, p&lt;0.001; ***, p&lt;0.001. Data from five sessions, two rats. See <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig3.jpg"/></fig><p>The use of supervised learning for training and posterior validation requires using datasets annotated by experts. However, the expert&#8217;s opinion may be influenced by the recording method, the experimental goal, and the existing knowledge. To evaluate the impact of these potential biases, we used the ground truth from a second expert in the lab for validation purposes only (3403 events, n=14 sessions, seven mice). While results were overall comparable, there were some natural differences between experts in a session-by-session basis (<xref ref-type="fig" rid="fig3">Figure 3B</xref>). Interestingly, when we confronted the network detection with the consolidated ground truth, we noted that the CNN could be actually detecting many more SWR events than initially accounted by each individual expert (one-way ANOVA for ground truth, CNN12: F(2)=0.01, p=0.026; CNN32: F(2)=0.01, p=0.013). In contrast, the filter failed to exhibit such an improvement, and performed worse when tested against the consolidated ground truth (one-way ANOVA for models, F(2)=0.02, p=0.033) (<xref ref-type="fig" rid="fig3">Figure 3B</xref>, rightmost). Notably, an expert acting as a classifier of the other expert&#8217;s ground truth scored at 0.70&#177;0.13 (<xref ref-type="fig" rid="fig3">Figure 3C</xref>), providing mean reference of best performance (<xref ref-type="fig" rid="fig3">Figure 3A and B</xref>).</p><p>To evaluate this point further, and to test for the capability of the CNN to generalize beyond training with head-fixed mice data, we used an externally annotated dataset of SWR recorded with high-density silicon probes from freely moving rats (<xref ref-type="bibr" rid="bib26">Grosmark and Buzs&#225;ki, 2016</xref>; <xref ref-type="fig" rid="fig3">Figure 3D</xref>; 2041 events; five sessions from two rats; <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>). In that work, SWR detection was conditioned on the coincidence of both population synchrony and LFP definition, thus providing a &#8216;partial ground truth&#8217; (i.e. SWR without population firing were not annotated in the dataset). Consistently, the network recalled most of the annotated events (R=0.80 &#177; 0.18), but precision was apparently low (P=0.42 &#177; 0.18) (<xref ref-type="fig" rid="fig3">Figure 3E</xref>). Hence, we evaluated all False Positive predictions and found that many of them were actually unannotated SWR (2403 events), meaning that precision was actually higher (P=0.77 &#177; 0.08 for CNN32, P=0.86 &#177; 0.08; for CNN12, both at P&lt;0.01 for paired t-test; <xref ref-type="fig" rid="fig3">Figure 3E</xref>). As above, the filter failed to improve F1 performance (<xref ref-type="fig" rid="fig3">Figure 3E</xref>), and remained lower than for the CNN12.</p><p>Altogether, our analyses indicate that detection limits of SWR may be determined by the expert&#8217;s criteria. CNN performance improves when confronted with the consolidated ground truth, supporting that shared community tagging may help to advance our understanding of SWR definition. Importantly, a CNN trained in data from head-fixed mice was able to generalize to freely moving rats.</p></sec><sec id="s2-4"><title>Unveiling SWR latent features</title><p>Interpretability is a major issue in modern machine learning (<xref ref-type="bibr" rid="bib39">Mahendran and Vedaldi, 2014</xref>; <xref ref-type="bibr" rid="bib57">Richards et al., 2019</xref>). To better understand and validate CNN operation, we looked for methods to visualize the kernel features that had better explained the network ability to recognize SWR events. We exploited a standard procedure from CNN image recognition (<xref ref-type="bibr" rid="bib62">Simonyan et al., 2013</xref>) consisting on maximizing the KA using gradient ascent in the input space (<xref ref-type="fig" rid="fig4">Figure 4A</xref>, top). To this purpose, a noisy LFP input is progressively updated until the KA is maximal, using different initialization values (<xref ref-type="fig" rid="fig4">Figure 4A</xref>, bottom). The resulting signal is equivalent to a saliency map reflecting the latent preferred features by each CNN kernel. This approach is similar to infer visual receptive fields using noise stimulation.</p><fig id="fig4" position="float"><label>Figure 4.</label><caption><title>Analysis of the convolutional neural network (CNN) kernel saliency maps.</title><p>(<bold>A</bold>) Schematic illustration of the method to calculate the kernel saliency maps using gradient ascent. Note that different initializations converge to the same solution. (<bold>B</bold>) Examples of saliency maps from some representative kernels. Note ripple-like preferred features of L1 kernels and temporally specific features of L19 and L22 kernels. (<bold>C</bold>) Pattern-matching between saliency maps shown in B and local field potential (LFP) inputs of the example SWR event (120 ms window). (<bold>D</bold>) Same as in C for a True Negative example event. (<bold>E</bold>) Mean template-matching signal (top) and maximal values (bottom) from all detected events classified by CNN32 as True Positive (4385 events), False Positives (2468 events), False Negatives (3055 events), and True Negatives (4902 events). One-way ANOVA, F(3)=1517, p&lt;0.0001; ***, p&lt;0.001 after correction by Bonferroni. (<bold>F</bold>) Distribution of False Positive events per categories both in the CNN32 and the filter.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig4.jpg"/></fig><p>Similar as two-dimensional CNN layers specialize in detecting edges, shapes, and textures of an image, we found the kernels focused in distinct LFP features. Consistently with data above, kernels from the first layers specialized in detecting rhythmic and periodic patterns (e.g. L1K1 and L1K2), while later layers seem to focus in identifying these patterns along time (e.g. L19K18; <xref ref-type="fig" rid="fig4">Figure 4B</xref>). By computing the pattern-matching function between saliency maps and the 8-channels LFP, we evaluated how the kernels accounted for different features of True Positive events, that is, SWR (<xref ref-type="fig" rid="fig4">Figure 4C</xref>). For example, L1K1 was maximally activated at the peak of ripple oscillations, while L1K2 and L19K18 were maximal at the onset, supporting the network ability to anticipate SWR. Pattern-matching between true SWR events and the saliency map of the output layer L22 provided an idea of what the CNN recognized as an ideal &#8216;object&#8217;. In contrast, pattern-matching values in the absence of SWR events (True Negative events) were typically lower as compared with those obtained from the ground truth (<xref ref-type="fig" rid="fig4">Figure 4D</xref>).</p><p>To quantify these observations, we evaluated how much the output of L22K1 saliency maps matched different input events, using data from the training and offline validation sessions (17 sessions, seven mice). Consistent with the examples, pattern-matching was maximal for True Positive and minimal for True Negative events (one-way ANOVA, F(3)=1517, p&lt;0.0001). Pattern-matching values were higher for False Positives than for False Negatives (<xref ref-type="fig" rid="fig4">Figure 4E</xref>), meaning that the network may be identifying some latent features. A closer examination of False Positive predictions suggested that about 20% of them could be reclassified. From these, about one-third were sharp waves without clear associated ripples (SW no ripples), while others were actually ripples events without associated sharp waves (ripples no SW), population firing, and artifacts (<xref ref-type="fig" rid="fig4">Figure 4F</xref>). Instead, examination of False Positive by the filter showed a major trend to detect artifacts at the expenses of more physiologically relevant events (<xref ref-type="fig" rid="fig4">Figure 4F</xref>). Examples of True Positive and False Positive detected by the CNN can be seen in <xref ref-type="fig" rid="fig5">Figure 5</xref>.</p><fig id="fig5" position="float"><label>Figure 5.</label><caption><title>Examples of True Positive and False Positive detections by the convolutional neural network (CNN).</title><p>Note that some False Positive events are sharp waves without ripples (SW no ripple) and sharp wave with population firing. The CNN also detected ripples with no clear associated sharp wave (ripple no SW). While all these False Positive types of events are not included in the ground truth, they resemble physiological relevant categories. This figure is built with an executable code: <ext-link ext-link-type="uri" xlink:href="https://colab.research.google.com/github/PridaLab/cnn-ripple-executable-figure/blob/main/cnn-ripple-false-positive-examples.ipynb">https://colab.research.google.com/github/PridaLab/cnn-ripple-executable-figure/blob/main/cnn-ripple-false-positive-examples.ipynb</ext-link>.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig5.jpg"/></fig><p>This analysis confirms that the CNN has the ability to identify SWR events by relying on feature-based kernel operation. Moreover, some ambiguous predictions according to the current definition of SWR may identify different forms of population firing and oscillatory activities associated to sharp waves, supporting the network ability to generalize beyond the particular expert&#8217;s ground truth.</p></sec><sec id="s2-5"><title>Interpreting and explaining CNN operation</title><p>As shown above, the CNN ability relies on feature extraction by the different kernels. To gain explanatory power on how this applies to SWR detection, we sought to visualize and quantify the CNN kernel operation.</p><p>First, we examined the weights of the first layer kernels, which act directly over high-density LFP inputs. We noted that their profiles were especially suited for assessing critical LFP features, such as the laminar organization of activity. For example, L1K1 acted along the spatial scale by differentially weighting LFP channels along the somatodendritic axis and deep-superficial layers (<xref ref-type="fig" rid="fig6">Figure 6A</xref>), consistent with the saliency map shown above. In contrast, weights from L1K2 likely operated in the temporal scale with major differences along the kernel length (<xref ref-type="fig" rid="fig6">Figure 6A</xref>). In this case, by positively weighting upper channels at later samples this filter may be anticipating some SWR motifs, as shown before. Interestingly, opposing trends between top and bottom channels suggest some spatial effect as well. L1K3 and L1K4 provided less obvious integration across the spatial and temporal scales. In spite of the complexity of the resulting convolution along the entire event, visualization of KA reflects detection of ripples as well as the slow and fast deflections of the associated sharp wave (see L1 outputs in <xref ref-type="fig" rid="fig1">Figure 1D</xref> for CNN32; <xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1A,B</xref> for CNN12).</p><fig-group><fig id="fig6" position="float"><label>Figure 6.</label><caption><title>Feature map analysis of CNN32 operation.</title><p>(<bold>A</bold>) Examples of kernel weights from different layers of CNN32. Note different distribution of positive and negative weights. In layer 1, the four different kernels act to transform the 8-channels input into a single channel output by differently weighting contribution across the spatial (upper and lower local field potential [LFP] channels; vertical arrows in L1K1 and L1K2) and temporal scales (horizontal arrow in L1K2). See the resulting kernel activation for the example sharp-wave ripple (SWR) event in <xref ref-type="fig" rid="fig1">Figure 1D</xref>. (<bold>B</bold>) Feature map from the example SWR event (100 ms window; gray) built by concatenating the kernel activation signals from all layers into a single vector. The feature map of a randomly selected LFP epoch without annotated SWR is shown at bottom (black). (<bold>C</bold>) Two-dimensional reduced visualization of CNN32 feature maps using Uniform Manifold Approximation and Projection (UMAP) shows clear segregation between similar number of SWRs (ground truth [GT]) and randomly chosen LFP epochs (Rand) (7491 events, sampled from 17 sessions, seven mice). Note distribution of SWR probability at right consistent with the ground truth. (<bold>D</bold>) Distribution of True Positive, True Negative, False Positive, and False Negative events in the UMAP cloud. (<bold>E</bold>) Distribution of the False Positive events previously validated in <xref ref-type="fig" rid="fig4">Figure 4F</xref>. Note that they all lay over the ground truth region.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig6.jpg"/></fig><fig id="fig6s1" position="float" specific-use="child-fig"><label>Figure 6&#8212;figure supplement 1.</label><caption><title>Feature map analysis of CNN12 operation.</title><p>(<bold>A</bold>) Examples of CNN12 kernel activations for layers 1&#8211;4 resulting from the example sharp-wave ripple (SWR) event. Note how the 8-channel local field potential (LFP) input is progressively transformed to capture different features of the event at a higher resolution as compared with CNN32 (see <xref ref-type="fig" rid="fig1">Figure 1D</xref>). (<bold>B</bold>) Examples of CNN12 kernel weights. As for CNN32 (<xref ref-type="fig" rid="fig6">Figure 6A</xref>), note different distribution of positive and negative weights across the spatial and temporal scales. (<bold>C</bold>) Uniform Manifold Approximation and Projection (UMAP) plot of the CNN12 feature maps shows clear segregation between similar number of SWRs (ground truth [GT]) and randomly chosen LFP epochs (Rand) (7491 events from 17 sessions, seven mice). The distribution of SWR probability is shown next, as well as the distribution of all detected events by categories. (<bold>D</bold>) Performance (F1) evaluated for each CNN layer at both temporal resolutions (data from 17 sessions, seven mice).</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig6-figsupp1.jpg"/></fig></fig-group><p>The same reasoning applies to the next layers. However, since CNN acts to transform an LFP &#8216;object&#8217; into a probability value, the spatial and temporal features of SWR events become increasingly abstract. Notwithstanding, their main features are still recognized. For example, L4K1 and L4K2 outputs likely reflected the spatiotemporal organization of the input SWR event, in particular the slower components and uneven distribution of ripples (see <xref ref-type="fig" rid="fig1">Figure 1D</xref> and <xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1A</xref>).</p><p>To quantify these observations, we evaluated how the different kernels were activated by a similar number of LFP events centered at either the ground truth or at a random timing (<xref ref-type="fig" rid="fig6">Figure 6B</xref>, 7491 events in each category; data from both the training and test offline sessions). For each window, we concatenated the resulting KA from all layers in a single vector, dubbed feature map (<xref ref-type="fig" rid="fig6">Figure 6B</xref>; length 1329 for CNN32, 3991 for CNN12). Since each layer generates a characteristic activity in response to input data, we reasoned that feature maps should carry information on the network representation of a particular LFP event.</p><p>We used Uniform Manifold Approximation and Projection (UMAP), a computationally efficient dimensionality reduction and visualization tool, to explore feature maps. UMAP successfully segregated feature maps of LFP events according to their detection probability in a two-dimensional cloud (<xref ref-type="fig" rid="fig6">Figure 6C</xref>; <xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1C</xref>), supporting that the entire CNN is coding for different features of SWR across layers (<xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1D</xref>).</p><p>We labeled each LFP event in UMAP coordinates as True Positive (detected ground truth events), False Positive (random events detected as SWR), False Negative (undetected ground truth), and True Negative (unannotated and undetected events). We found striking segregation across the UMAP cloud with True Positive and True Negative events falling apart (<xref ref-type="fig" rid="fig6">Figure 6D</xref>; <xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1C</xref>). False Negatives were mostly located at the intermediate region, suggesting they could be detected with less conservative thresholds. Interestingly, False Positive predictions were scattered all around the cloud, supporting the idea that they reflect heterogeneous events as seen above. Mapping all the previously validated False Positive events (see <xref ref-type="fig" rid="fig4">Figure 4F</xref>) over the UMAP cloud confirmed that those corresponding to population firing synchrony and sharp waves without ripples distributed over the ground truth, while those corresponding to artifacts mostly fell apart (<xref ref-type="fig" rid="fig6">Figure 6E</xref>).</p><p>Altogether, these analyses permitted us to understand how the CNN operates to detect SWR events. Our study suggests that a CNN relying on feature-based detection allows to capture a large diversity of SWR events. The new method, in combination with community tagging efforts and optimized filters, could potentially facilitate discovery and interpretation of the complex neurophysiological processes underlying SWR.</p></sec><sec id="s2-6"><title>Leveraging CNN capabilities to interpret SWR dynamics</title><p>Equipped with this tool we sought to understand the dynamics of SWR across the entire hippocampus. To this purpose, we obtained Neuropixels recordings from different rostro-caudal penetrations in head-fixed mice (<xref ref-type="fig" rid="fig7">Figure 7A</xref>; n=4 sessions, four mice; <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>). Detailed post hoc histological analysis validated the probe tracks passing through a diversity of brain regions, including several thalamic nuclei as well as the dorsal and ventral hippocampus (<xref ref-type="fig" rid="fig7">Figure 7B</xref>, <xref ref-type="fig" rid="fig7s1">Figure 7&#8212;figure supplement 1A</xref>).</p><fig-group><fig id="fig7" position="float"><label>Figure 7.</label><caption><title>Hippocampus-wide sharp-wave ripple (SWR) dynamics through the lenses of convolutional neural network (CNN).</title><p>(<bold>A</bold>) Neuropixels probes were used to obtain ultra-dense local field potential (LFP) recordings across the entire hippocampus. Offline detection was applied over continuous simulated penetrations (8-channels). Detection performance is evaluated across brain regions and hippocampal layers using the CNN trained with a different electrode type. See Methods for the list of acronyms. (<bold>B</bold>) Histological validation of one of the experiments shown in A (red arrowhead). Scale bar corresponds to 350 &#181;m. (<bold>C</bold>) Performance of CNN32 across hippocampal layers (96 dorsal simulated penetrations, four mice). The results of an independent one-way ANOVA for P, R, and F1 is shown separately. ***, p&lt;0.001. (<bold>D</bold>) Dorsoventral differences of CNN32 performance across layers. P, R, and F1 values from dorsal and ventral detection were compared pairwise (55 dorsal and 55 ventral simulated penetrations, four mice). *, p&lt;0.05; **, p&lt;0.01; ***, p&lt;0.001. (<bold>E</bold>) Example of an SWR detected across several layers (black arrowhead). Note ripple oscillations all along the SR and SLM. A SWR event which was only detected at SP dorsal and ventral is shown at right (open arrowhead). (<bold>F</bold>) Mean LFP and current-source density (CSD) signals from the events detected at different layers of the dorsal hippocampus of mouse Npx-Thy160620 (top). Bottom plots show the SWR-triggered average responses of pyramidal cells and interneurons. Cells are sorted by their timing during SWR events detected at SP. (<bold>G</bold>) Quantification of the magnitude of the SR sink and SLM source for events detected at SO, SR, and SLM, as compared against SP detection. One-way ANOVA SR CSD: F(2)=9.13, p=0.0004; SLM CSD: F(2)=9.64, p=0.0003; **, p&lt;0.01; ***, p&lt;0.001. (<bold>H</bold>) Quantification of changes of firing rate and timing of pyramidal cells during SWR detected at different layers. Firing rate: F(3) = 28.68, p&lt;0.0001; *, p&lt;0.05; ***, p&lt;0.001. Timing: F(2) = 10.18, p&lt;0.0001; ***, p&lt;0.0001.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig7.jpg"/></fig><fig id="fig7s1" position="float" specific-use="child-fig"><label>Figure 7&#8212;figure supplement 1.</label><caption><title>Convolutional neural network (CNN) detection of sharp-wave ripple (SWR) from ultra-dense Neuropixels recordings.</title><p>(<bold>A</bold>) Detailed post hoc histological analysis with SHARP-Track (<xref ref-type="bibr" rid="bib60">Shamash et al., 2018</xref>) allowed identifying a diversity of brain regions pierced by the Neuropixels probe. The different brain regions were annotated and confronted with the Paxinos atlas. (<bold>B</bold>) Distribution of True Positive events detected by CNN32 across layers and regions of the four different mice. (<bold>C</bold>) Mean local field potential (LFP) of True Positive events detected by CNN32 at SO, SR, and SLM of the examples shown in <xref ref-type="fig" rid="fig7">Figure 7F</xref>. Note that sharp waves and ripples are differentially visible at these layers. (<bold>D</bold>) Detection performance of the Butterworth filter across hippocampal layers for mouse Npx-Cal280720. See the same session analyzed by CNN32 at the rightmost plot of <xref ref-type="fig" rid="fig7">Figure 7A</xref>. (<bold>E</bold>) Detection performance of RippleNET across hippocampal layers for mouse Npx-Cal280720. Same session as in D and at the rightmost plot of <xref ref-type="fig" rid="fig7">Figure 7A</xref>. (<bold>F</bold>) Quantification of changes of the firing rate and timing of putative GABAergic interneurons during SWR detected at different layers. Firing rate: F(3) = 3.89, p=0.011; *, p&lt;0.05. Timing: not significant.</p></caption><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/fig7-figsupp1.jpg"/></fig></fig-group><p>By exploiting the ultra-dense configuration of Neuropixels, we simulated consecutive penetrations covering the entire dorsoventral axis (<xref ref-type="fig" rid="fig7">Figure 7A</xref>). We run offline detection using eight neighboring Neuropixels channels as the inputs, then move four channels downward/upward and repeat detection again, up to the end of the probe. We used the original CNN32 without retraining, the Butterworth filter and RippleNET, to evaluate detection performance against the ground truth.</p><p>Consistent with data above, we found successful detection of SWR events by the CNN from the dorsal CA1 region (<xref ref-type="fig" rid="fig7">Figure 7A</xref>). While detection was optimal at the CA1 cell layer (stratum pyramidale [SP]), we noted many events were actually identified from SWR-associated LFP signatures at the radiatum (SR) and lacunosum moleculare (SLM) layers (<xref ref-type="fig" rid="fig7">Figure 7C</xref>; <xref ref-type="fig" rid="fig7s1">Figure 7&#8212;figure supplement 1B</xref>). When evaluated per layer, detection of SWR was better at the dorsal than at the ventral hippocampus, except for SR and SLM (<xref ref-type="fig" rid="fig7">Figure 7D</xref>, left). We found no major differences except for precision, when all layers were pooled together (<xref ref-type="fig" rid="fig7">Figure 7D</xref>, right). No difference in the rate of False Positives between SO (0.52&#177;0.21), SR (0.50&#177;0.21), and SLM (0.46&#177;0.19) can account for this effect.</p><p>In spite that only a subset of SWR could be identified from recordings at SR and SLM (i.e. R-values were low), precision was very high (i.e. over 80% of predictions were consistent with the ground truth). A close examination of the morphology of these events confirmed they exhibited LFP and oscillatory features consistent with the kernel saliency maps (<xref ref-type="fig" rid="fig7">Figure 7E</xref>, <xref ref-type="fig" rid="fig7s1">Figure 7&#8212;figure supplement 1C</xref>). Remarkably, both the Butterworth filter and RippleNET failed to identify SWR-associated signatures beyond the dorsal SP (<xref ref-type="fig" rid="fig7s1">Figure 7&#8212;figure supplement 1D,E</xref>).</p><p>To gain insights into the underlying physiology and to discard for potential volume conduction effects, we simulated linear penetrations through the dorsal hippocampus and estimated the associated current-source density (CSD) signals of events detected at different layers (<xref ref-type="fig" rid="fig7">Figure 7F</xref>, top). We found larger sinks and sources for SWR that can be detected at SLM and SR versus those detected at SO (<xref ref-type="fig" rid="fig7">Figure 7G</xref>; z-scored by mean values of SWR detected at SP only). We also exploited Neuropixels to isolate activity from putative pyramidal cells (n=99) and interneurons (n=29, all penetrations) during the different SWR event types (<xref ref-type="fig" rid="fig7">Figure 7F</xref>, bottom). For pyramidal cells, we found striking reorganization of the firing rate and timing during SWR detected at SO, SR, and SLM (<xref ref-type="fig" rid="fig7">Figure 7H</xref>). Interneurons exhibited similar variability (<xref ref-type="fig" rid="fig7s1">Figure 7&#8212;figure supplement 1E</xref>). Timing and rate differences of pyramidal cell and interneuronal firing with respect to SWR events detected at different layers support the idea that they reflect activation of different hippocampal ensembles. Our CNNs thus provide unique opportunities to study the so far elusive dynamics accompanying SWR responses.</p></sec></sec><sec id="s3" sec-type="discussion"><title>Discussion</title><p>Here, we report how one-dimensional convolutional networks operating over high-density LFP recordings allows for improved detection and interpretation of hippocampal SWR events. While the network was trained in a subset of LFP data recorded around the dorsal CA1 cell layer of head-fixed mice, detection generalized across strata, brain locations (e.g. ventral hippocampus), preparations (i.e. freely moving), and species (i.e. rats) without the need for retraining. Our CNN exhibited a much higher stability, less threshold-dependent sensitivity, and overall higher performance as compared with the spectral filter and RippleNET, a recurrent neural network solution. This unique capability of our CNN relies on feature-based analysis of LFP signals, which provide similar explanatory power as standard LFP profiling. Such a developmental potential of convolutional neural networks permits challenging the interpretation of brain signals (<xref ref-type="bibr" rid="bib22">Frey et al., 2021</xref>), and SWR in particular (this study).</p><p>From a physiological perspective, studying brain function relies in understanding activity in relation to behavior and cognition (<xref ref-type="bibr" rid="bib11">Cohen, 2017</xref>; <xref ref-type="bibr" rid="bib23">Friston et al., 2015</xref>). Inspired by the tradition to observe and categorize, neuroscientists require classifying EEG/LFP signals into patterns, which presumably should gain mechanistic significance at the neuronal and microcircuit levels (<xref ref-type="bibr" rid="bib7">Buzs&#225;ki and Draguhn, 2004</xref>; <xref ref-type="bibr" rid="bib19">Fern&#225;ndez et al., 1999</xref>; <xref ref-type="bibr" rid="bib46">Niedermeyer and Silva, 2005</xref>). Yet, some of the most widely used classification schemes still generate debate. For instance, contributors to gamma oscillations (40&#8211;100 Hz) include fluctuating synaptic potentials reflecting inhibition, excitation, or both in interaction with phase-locking firing from subsets of cells (<xref ref-type="bibr" rid="bib2">Atallah and Scanziani, 2009</xref>; <xref ref-type="bibr" rid="bib4">Bartos et al., 2007</xref>). The specific contribution of the different factors at the resulting dominant oscillatory frequency band is not trivial (<xref ref-type="bibr" rid="bib10">Buzs&#225;ki and Schomburg, 2015</xref>). In addition, relying on spectral definitions to analyze EEG/LFP data has to cope with the nonstationary nature of brain activity, while the demarcation of frequency bands does not necessarily fit to unambiguous basic mechanisms. Whether this reflects the elusive emergent behavior of brain activity or methodological limitations is arguable.</p><p>We aimed exploiting machine-learning tools to transform the study of hippocampal SWR, a major neurophysiological event underlying memory trace consolidation and recall (<xref ref-type="bibr" rid="bib9">Buzs&#225;ki, 2015</xref>). While SWR presumably entail coordinated activity of pyramidal cells and GABAergic interneurons in a mnemonically relevant sequence-specific manner (<xref ref-type="bibr" rid="bib15">Diba and Buzs&#225;ki, 2007</xref>; <xref ref-type="bibr" rid="bib25">Gridchyn et al., 2020</xref>; <xref ref-type="bibr" rid="bib47">Olafsd&#243;ttir et al., 2018</xref>; <xref ref-type="bibr" rid="bib66">Stark et al., 2015</xref>; <xref ref-type="bibr" rid="bib70">van de Ven et al., 2016</xref>), their physiological definition seems constrained (<xref ref-type="bibr" rid="bib10">Buzs&#225;ki and Schomburg, 2015</xref>). Moreover, the replay content and order unfold neuronal representations in a myriad of combinations in the service for cognitive agency and flexibility (<xref ref-type="bibr" rid="bib30">Joo and Frank, 2018</xref>; <xref ref-type="bibr" rid="bib54">Pfeiffer, 2020</xref>). The potentially different mechanisms underlying such a representational complexity are not yet integrated into the existing definition of SWR (<xref ref-type="bibr" rid="bib14">de la Prida, 2020</xref>).</p><p>When coupled to ultra-dense Neuropixels, our CNN identified subsets of SWR across different strata of the dorsal and ventral hippocampus. The ability to detect events across layers seemed to rely in a combination of features with the strength and visibility of the associated current sinks/sources having major contributions. This calls for the existence of different generators emerging from interaction between different input pathways and local microcircuits (<xref ref-type="bibr" rid="bib13">de la Prida et al., 2006</xref>). For instance, recent data suggest pivotal role of entorhinal inputs in modulating and elongating the dynamics of locally generated SWRs (<xref ref-type="bibr" rid="bib20">Fern&#225;ndez-Ruiz et al., 2019</xref>; <xref ref-type="bibr" rid="bib49">Oliva et al., 2018</xref>; <xref ref-type="bibr" rid="bib71">Yamamoto and Tonegawa, 2017</xref>). Similarly, SWR events disproportionally weighted by downstream inputs along the CA3 to CA2 axis differentially modulate consolidation of recognition memory at the social and nonsocial domains (<xref ref-type="bibr" rid="bib43">Nakashiba et al., 2009</xref>; <xref ref-type="bibr" rid="bib50">Oliva et al., 2020</xref>; <xref ref-type="bibr" rid="bib48">Oliva et al., 2016</xref>). Consistently, we found that some ripples can be actually detected at SO, SR, and SLM strata independently on their alleged local generation at the CA1 cell layer.</p><p>The configuration of the current sinks and sources associated to independently detected SWR events suggest that the weighted interaction between fluctuating input pathways may entail contribution by different factors across behavioral states (<xref ref-type="bibr" rid="bib9">Buzs&#225;ki, 2015</xref>). For instance, different subcircuits may contribute to sleep and awake SWR with different cognitive roles (<xref ref-type="bibr" rid="bib59">Roumis and Frank, 2015</xref>). The ability to detect ripple oscillations at different layers also indicate a role for dendritic potentials, such as complex spikes and dendritic bursts (<xref ref-type="bibr" rid="bib6">Bittner et al., 2015</xref>; <xref ref-type="bibr" rid="bib32">Kamondi et al., 1998</xref>). Finally, while attention is traditionally focused on parvalbumin and cholecystokinin GABAergic basket cells providing perisomatic innervation (<xref ref-type="bibr" rid="bib36">Klausberger et al., 2005</xref>), other GABAergic cells and terminals located at the border between SR and SLM may equally contribute (<xref ref-type="bibr" rid="bib5">Basu et al., 2016</xref>; <xref ref-type="bibr" rid="bib35">Kitamura et al., 2015</xref>; <xref ref-type="bibr" rid="bib37">Klausberger and Somogyi, 2008</xref>). This is supported by larger current sources associated with SWR events detected at SLM layers, as we show here.</p><p>Our data suggest that only one part of the dorsal SWR dynamics can be explained locally, consistent with complex interaction along the septotemporal axis (<xref ref-type="bibr" rid="bib51">Patel et al., 2013</xref>). Instead, the CNN identify different types of SWR events detected at distant strata suggesting major role of input pathways. A segregated role for dorsal and ventral SWR events suggest that brain-wide subcircuits inherit the different representational dynamics of a variety of replays (<xref ref-type="bibr" rid="bib63">Sosa et al., 2020</xref>). The detection unfolding of CNN thus permit an unbiased categorization without relying on more elusive spectral criteria. Critically, both the filter and RippleNET failed to capture SWR diversity across strata further confirming the suitability of CNN to identify critical LFP features accompanying a wealth of events.</p><p>Our method also identified events beyond the individual expert ground truth. Careful examination of those False Positives reveal sharp waves associated to population firing without ripples, as well as other unclassified forms of activities. While we cannot discard noisy detection from a continuum of LFP activity, our categorization suggest that they may reflect processes underlying buildup of population events (<xref ref-type="bibr" rid="bib13">de la Prida et al., 2006</xref>). In addition, the ability of CA3 inputs to bring about gamma oscillations and multi-unit firing associated with sharp waves is already recognized (<xref ref-type="bibr" rid="bib67">Sullivan et al., 2011</xref>), and variability of the ripple power can be related with different cortical subnetworks (<xref ref-type="bibr" rid="bib33">Karimi Abadchi et al., 2020</xref>; <xref ref-type="bibr" rid="bib55">Ramirez-Villegas et al., 2015</xref>). Since the power spectral level operationally defines the detection of SWR, part of this microcircuit intrinsic variability may be escaping analysis when using spectral filters.</p><p>Understanding how the brain encodes for memory is challenging. Recent data suggest that replay emerging from SWR is more complex than originally thought (<xref ref-type="bibr" rid="bib30">Joo and Frank, 2018</xref>). Cell-type-specific subcircuits, operating over a variety of interneuronal classes and under the influence of different input pathways, provide mechanistic support for a wealth of SWR events (<xref ref-type="bibr" rid="bib14">de la Prida, 2020</xref>). Yet, SWR detected by gold-standard spectral methods fail to reflect the necessary statistical variance that allows for identifying specific trends. Relying on unbiased feature-based methods hopefully can change the game.</p></sec><sec id="s4" sec-type="materials|methods"><title>Materials and methods</title><sec id="s4-1"><title>Animals</title><p>All protocols and procedures were performed according to the Spanish legislation (RD 1201/2005 and L.32/2007) and the European Communities Council Directive 2003 (2003/65/CE). Experiments were approved by the Ethics Committee of the Instituto Cajal and the Spanish Research Council.</p><p>In this work, we used different mouse lines aimed to target different cell-type-specific populations for optogenetic and imaging experiments. Experiments included in this paper follow the principle of reduction, to minimize the number of animals used and this is the reason why we obtained data from different mouse lines. Animals and sessions used are summarized in <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>. Animals were maintained in a 12 hr light-dark cycle (7 a.m. to 7 p.m.) with access to food and drink ad libitum.</p></sec><sec id="s4-2"><title>Head-fixed preparation</title><p>Mice were implanted with fixation bars under isoflurane (1.5&#8211;2%; 30% oxygen) anesthesia. Bars and ground/reference screws (over the cerebellum) were fixed with light-curing acrylic resins (OptiBond and UNIFAST LC). After surgery, mice were treated with buprenorphine during 2 days. For optogenetic experiments, mice from different promotor-specific Cre lines were previously injected with AAV5-DIO-EF1a-hChR2-EYFP (1 &#181;l; titer 4.5&#8201;&#215;&#8201;1012 vg/ml; provided by UNC Vector core, Deisseroth lab) targeting the dorsal CA1 region (&#8722;1.9 &#8201;mm AP; 1.25 mm&#8201;ML and 1 mm depth). Transgenic Thy1-ChR2-YFP and Thy1-GCaMP7 mice were directly implanted with fixation bars.</p><p>Two days after surgery, mice were habituated to head-fixed conditions (10&#8211;14 days of training). The apparatus consisted on a wheel (40 cm diameter) hosting different somatosensory cues and equipped with a Hall sensor (HAMLIN 55300; Littelfuse Inc) to track for position analogically. Animals were water rewarded just after each training session (2&#8211;4 sessions &#8201;&#215;&#8201;day). After several days, mice were able to stay comfortable in the apparatus with periods of running, grooming, and immobility.</p><p>Once habituated, mice were anesthetized with isoflurane and a craniotomy was practiced for electrophysiological recordings (antero-posterior: &#8722;3.9 to &#8722;6 mm from Bregma; medio-lateral: 2&#8211;5 mm). The craniotomy was sealed with Kwik-Cast silicone elastomer and mice returned to their home cage. Recording sessions started the day after craniotomy.</p></sec><sec id="s4-3"><title>Electrophysiological recordings</title><p>LFP recordings were obtained with integrated &#181;LED optoelectrodes (32 channels, 4 shanks of 8-channels, and 3 &#181;LED each) originally provided by Euisik Yoon under the NSF-funded NeuroNex project and later purchased from NeuroLight Technologies, LLC, N1-A0-036/18 and Plexon. Wideband (1 Hz&#8211;5 KHz) LFP signals were recorded at 30 KHz sampling rate with an RHD2000 Intan USB Board running under OE. Optoelectrode recordings targeted the dorsal CA1 region, using characteristic features such as the laminar profile of theta and SWRs, as well as unit activity to infer position within the hippocampus.</p><p>Ultra-dense recordings were obtained with Neuropixels 1.0 probes and acquired with the PXIe acquisition module mounted in the PXI-Express chassis (National Instruments). Neuropixels probes consist of up to 966 recording sites (70&#215;20 &#181;m) organized in a checkerboard pattern, from which 384 can be selected for recording. Recordings were made in external reference mode with LFP gain set at 250 and at 2500 Hz sampling rate, using the SpikeGLX software. The probe targeted the dorsal-to-ventral hippocampus at different anterior-to-posterior positions in four different mice (<xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>). To recover the penetrating track precisely, the back of the Neuropixels probe was coated with DiI (Invitrogen).</p><p>After completing experiments, mice were perfused with 4% paraformaldehyde and 15% saturated picric acid in 0.1 M (pH 7.4) phosphate-buffered saline (PBS). Brains were post-fixed overnight, washed in PBS, and serially cut in 70 &#181;m coronal sections (Leica VT 1000S vibratome). Sections containing the probe tracks were identified with a stereomicroscope (S8APO, Leica) and mounted on glass slides in Mowiol (17% polyvinyl alcohol 4&#8211;88, 33% glycerin, and 2% thimerosal in PBS).</p><p>Sections from Neuropixels recording were analyzed with SHARP-Track, a tool to localize regions going through electrode tracks (<xref ref-type="bibr" rid="bib60">Shamash et al., 2018</xref>) (<ext-link ext-link-type="uri" xlink:href="https://github.com/petersaj/AP_histology">https://github.com/petersaj/AP_histology</ext-link> <xref ref-type="bibr" rid="bib52">Peters, 2022</xref>). Acronyms in <xref ref-type="fig" rid="fig7">Figure 7</xref> correspond to the following: corpus callosum (cc); primary visual cortex (V1); stratum oriens (SO); stratum piramidale (SP); stratum radiatum (SR); stratum lacunosum moleculare (SLM); molecular layer dentate gyrus (ML); granular layer dentate gyrus (GCL); hilus (HIL); hippocampal fissure (fiss); basolateral amygdala (BLA); amygdalopiriform transition area (APir); lateral posterior medial rostral thalamus (LPMR); posterior thalamus (Po); ventro posterior medial thalamus (VPM); ventro posterior lateral thalamus (VPL); lemniscus (Lemn); ventro medial thalamus (VM); zona incerta, dorsal part (ZID); zona incerta, ventral part (ZIV).</p></sec><sec id="s4-4"><title>Neural network specifications</title><p>We used Python 3.7.9 with libraries Numpy 1.18.5, Scipy 1.5.4, Pandas 1.1.4, and H5Py 2.10.0 for programming different routines. To build, train, and test the network, we use the Tensorflow 2.3.1 library, with its built-in Keras 2.4.0 application programming interface (API). Training and offline validation of the CNN was performed over the Artemisa high-performance computing infrastructure (<ext-link ext-link-type="uri" xlink:href="https://artemisa.ific.uv.es/web/content/nvidia-tesla-volta-v100-sxm2">https://artemisa.ific.uv.es/web/content/nvidia-tesla-volta-v100-sxm2</ext-link>). It consisted in 23 machines equipped with four NVIDIA Volta V100 GPUs. Analyses were conducted on personal computers (Intel Xeon E3 v5 processor with 64 GB RAM and Ubuntu v.20.04).</p><p>The CNN architecture was designed as a sequence of blocks integrated by one one-dimensional Convolutional layer (<xref ref-type="bibr" rid="bib12">Cun et al., 1990</xref>) followed by one BatchNorm layer (<xref ref-type="bibr" rid="bib28">Ioffe and Szegedy, 2015</xref>), and one Leaky ReLU Activation layer (<xref ref-type="bibr" rid="bib38">Maas et al., 2013</xref>). There were seven of these blocks (21 layers) and a final Dense layer (<xref ref-type="bibr" rid="bib58">Rosenblatt, 1958</xref>) (layer 22) (<xref ref-type="fig" rid="fig1">Figure 1C</xref>).</p><p>One-dimensional Convolutional layers (tf.keras.layers.Conv1D) were in charge of processing data and looking for characteristic features. These layers have a determined number of kernels, which was defined in the parameter search. A kernel is a matrix of weights acting to apply a convolution operation over data. The result of this operation is known as the KA signal. A Convolutional layer generates as many KA as the number of kernels it has. BatchNorm layers (tf.keras.layers.BatchNormalization) perform a normalization of the Convolutional layer KA, fixing its means and variances and providing stability and robustness to the whole network. Leaky ReLU layer (tf.keras.layers.LeakyReLU) has a similar purpose to the BatchNorm layer, making the network more stable by transforming negative input values into numbers very close to 0. The final Dense layer (tf.keras.layers.Dense) was fit to the dimension of the output space (i.e. probability values).</p><p>BatchNorm layer parameters were all left as their default values defined in the Tensorflow 2.3.1 library. The Leaky ReLU layer alpha parameter was set to 0.1. For Convolutional layers, the kernel size and stride were set to the same value so that the network operates similarly offline and online. The kernel size and stride determined the duration of the input window, so they were tuned in order to fit either a 32 ms window (CNN32) or a 12.8 ms window (CNN12). Values of kernel size and stride for Convolutional layers 1, 4, 7, 10, 13, 16, and 19 of CNN32 were: 5, 1, 2, 1, 2, 1, 2, respectively. For CNN12, the values were 2, 1, 2, 1, 2, 1, and 2. Since max-pooling layers can be replaced by Convolutional layers with increased stride, we chose not using max-pooling layers to avoid issues with the input window size (<xref ref-type="bibr" rid="bib64">Springenberg et al., 2014</xref>).</p><p>The number of kernels and kernel regularizers were selected after performing an initial parametric search (initial learning rate, number of kernels factor, and batch size; <xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1E</xref>). For the Dense layer we used a sigmoid activation function operating over 1 unit to produce 1 channel output. All the other parameters for the Convolutional layers, as well as for the Dense layer, were set initially at default values. Additionally, we also tested whether adding two LSTM layers before the final Dense layer improved performance in the preliminary parameter tests.</p><p>We selected our CNN32 as that with the lower and more stable training evolution (see below) from the 10-best networks in the initial parameter search (out of 107; <xref ref-type="supplementary-material" rid="supp1">Supplementary file 1B</xref>). CNN32+LSTM networks exhibited similar performance, but took substantially more time for training. A more thorough parametric search was conducted over a larger set of parameters (initial learning rate, number of kernels factor, batch size, optimizer, optimizer epsilon, regularizer, regularizer value, and decay; <xref ref-type="fig" rid="fig1s1">Figure 1&#8212;figure supplement 1E</xref>) for both types of architectures. The initially selected CNN32 was among the 30-best networks of the extended parameter search, also exhibiting fast training and high loss evolution stability (out of 781). Based on parametric searches, we chose the Adam algorithm as the optimizer (<xref ref-type="bibr" rid="bib34">Kingma and Ba, 2015</xref>) with an initial learning rate of 0.001, beta_1=0.9, beta_2=0.999, and epsilon = 1e-07. Batch size was set at 16 and the number of kernels for Convolutional layers 1, 4, 7, 10, 13, 16, and 19 was set at 4, 2, 8, 4, 16, 8, and 32, respectively. A L2 regularization method (<xref ref-type="bibr" rid="bib68">Tikhonov and Arsenin, 1977</xref>) with a 0.0005 value was employed to avoid overfitting. No additional learning rate decay was used.</p></sec><sec id="s4-5"><title>Ground truth and data annotation</title><p>A MATLAB R2019b tool was designed to annotate and validate data by an expert electrophysiologist (original expert). All data was visually inspected and SWR events annotated. An important decision we made was to manually annotate the start and the end of SWR events so that the network could learn anticipating events in advance. The start of the event was defined near the first ripple or the sharp-wave onset. The end of the event was defined at the latest ripple or when sharp-wave resumed. While there was some level of ambiguity on these definitions, we opted for including these marks in order to facilitate transition to ground truth detection. An additional expert (new expert) tagged SWR independently using a subset of sessions from the offline validation and online pool, to allow for comparisons between experts in the same lab.</p></sec><sec id="s4-6"><title>Data preparation</title><p>Datasets used for training and development of the CNN were created by loading a number of experimental sessions and storing them in two different three-dimensional matrices, X and Y.</p><p>Matrix X stored several chunks of 8-channels LFP recordings. From each session, LFP data from all probe shanks displaying any SWR were loaded, unless specific shanks were selected. If a shank had more than 8 channels, then they were randomly selected while giving priority to those located at the SP of CA1. All LFP signals were down-sampled to 1250 Hz and normalized using z-score. LFP signals were sliced into chunks of 57.6 s, which is exactly divisible by 0.032 and 0.0128 s, in order to keep a consistent matrix shape even when various sessions of different durations were used. This chunk size maintains the properties of long duration signals, which is essential for the CNN to reach a high-performance score when fed with continuous data. Chunks with no SWR events would be discarded, but that was an extremely rare case. At the end of this process the result is a matrix X with dimension (n, 72,000, 8), where n is the number of chunks of 72,000 samples (57.6 s sampled at 1250 Hz) for each of the eight LFP channels.</p><p>Matrix Y contained the annotated labels for each temporal window (32 or 12.8 ms) stored in X. To create Y, each chunk was separated in windows of 32 or 12.8 ms and then assigned a label, a number between 0 and 1, depending on the percentage of the window occupied by a SWR event. Therefore, dimension of matrix Y was (n, 1800, 1) for CNN32, since there are 1800 32 ms windows in a chunk of 57.6 s, and (n, 4500, 1) for CNN12, with 4500 windows of size 12.8 ms for each chunk.</p><p>Finally, the whole dataset (both X and Y matrices) was separated into the training set, used to fit the model, and the development set, used to evaluate the performance of the trained model with different data than those used for training while still tuning the network hyper-parameters. Train set took 70% of the data and development set the remaining 30%.</p></sec><sec id="s4-7"><title>CNN training, development, and testing</title><p>Two sessions from two different mice were used as the training set (<xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>). Training was run for 3000 epochs using the binary cross-entropy as loss function:<disp-formula id="equ1"><mml:math id="m1"><mml:mrow><mml:msub><mml:mi mathvariant="normal">H</mml:mi><mml:mrow><mml:mi mathvariant="normal">p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi mathvariant="normal">q</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#8722;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi mathvariant="normal">N</mml:mi></mml:mfrac><mml:munderover><mml:mo>&#8721;</mml:mo><mml:mrow><mml:mi mathvariant="normal">i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi mathvariant="normal">N</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi mathvariant="normal">y</mml:mi><mml:mrow><mml:mi mathvariant="normal">i</mml:mi></mml:mrow></mml:msub><mml:mo>&#8901;</mml:mo><mml:mi mathvariant="normal">l</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">g</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi mathvariant="normal">p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi mathvariant="normal">y</mml:mi><mml:mrow><mml:mi mathvariant="normal">i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#8722;</mml:mo><mml:msub><mml:mi mathvariant="normal">y</mml:mi><mml:mrow><mml:mi mathvariant="normal">i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#8901;</mml:mo><mml:mi mathvariant="normal">l</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">g</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#8722;</mml:mo><mml:mi mathvariant="normal">p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi mathvariant="normal">y</mml:mi><mml:mrow><mml:mi mathvariant="normal">i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:math></disp-formula></p><p>where N is the number of windows, y<sub>i</sub> is the label of window I, and p(y<sub>i</sub>) is the probability predicted for window i.</p><p>In order to evaluate the network performance, two different datasets were used: the training set described above, and the validation set, consisting of 15 sessions from five different animals that were not used for training or development (<xref ref-type="supplementary-material" rid="supp1">Supplementary file 1</xref>).</p><p>To detect SWR event, we set a probability threshold to identify windows with positive and negative predictions. Accordingly, predictions were classified in four categories: True Positive (TP) when the prediction was positive and the ground truth window did contain an SWR event; False Positive (FP) when the prediction was positive in a window that did not contain any SWR; False Negative (FN) when the prediction is negative but the window contained a SWR; and True Negative (TN) when the prediction was negative and the window did not contain any SWR event.</p><p>Intersection over Union (IOU) methodology was employed to classify predictions into those four categories. It was calculated by dividing the intersection (overlapping) of two windows by the union of them:<disp-formula id="equ2"><mml:math id="m2"><mml:mi>I</mml:mi><mml:mi>o</mml:mi><mml:mi>U</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>w</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mi>o</mml:mi><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#8745;</mml:mo><mml:msub><mml:mrow><mml:mi>w</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mi>o</mml:mi><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>w</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mi>o</mml:mi><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#8746;</mml:mo><mml:msub><mml:mrow><mml:mi>w</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mi>o</mml:mi><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:math></disp-formula></p><p>Two windows were considered to match if their IOU was equal to or greater than 0.1. If a positive prediction had a match with any window containing a ripple it was considered a TP, or it was classified as FP otherwise. All true events that did not have any matching positive prediction were considered FN. Negative predictions with no matching true events windows were TN.</p><p>With predicted and true events classified into those four categories there are three measures than can be used to evaluate the performance of the model. Precision (P), which was computed as the total number of TPs divided by TPs and FPs, represents the percentage of predictions that were correct.<disp-formula id="equ3"><mml:math id="m3"><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula></p><p>Recall (R), which was calculated as TPs divided by TPs and FNs, represents that percentage of true events that were correctly predicted.<disp-formula id="equ4"><mml:math id="m4"><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula></p><p>Finally, the F1 score, calculated as the harmonic mean of Precision and Recall, represents the network performance.<disp-formula id="equ5"><mml:math id="m5"><mml:mi>F</mml:mi><mml:mn>1</mml:mn><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mi>*</mml:mi><mml:mfenced separators="|"><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mi>*</mml:mi><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:mfenced></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula></p><p>As mentioned before, a prediction was considered positive when its probability surpassed a specified threshold. During offline detection, a first threshold was used to indicate the potential onset of the SWR event, followed by a second confirmatory higher threshold, which identifies the event itself. In order to select the best thresholds for offline validation, all combinations were compared and the one that yielded the best F1 score was chosen. Possible values for the first threshold were 0.80, 0.75, 0.70, 0.65, 0.60, 0.55, 0.50, 0.45, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, and 0.10, while for the second threshold were 0.80, 0.70, 0.60, 0.50, 0.40, 0.30, 0.20, and 0.10. Note that only the higher threshold is the one that defines detection, which is reported in figures. For online detection, only one threshold was used and it was adjusted manually at the beginning of the experiment based on the expert criteria.</p><p>To estimate delay between prediction and SWR events, the temporal relation between correct predictions and their matching true events was measured. SWR ripple peaks were defined after filtering the relevant LFP channel using a third-order Butterworth filter and an enlarged bandpass between 70 and 400 Hz. The resulting signal was subsequently filtered with a fourth-order Savitzky-Golay filter and smoothed twice with windows of 3 and 6.5 ms to obtain the SWR envelope. The maximal value of the envelope signal was defined as the SWR ripple peak. The interval between the initial prediction time and the SWR ripple peak was defined as the time to peak.</p><p>The trained model is accessible at the Github repository for both Python: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/cnn-ripple">https://github.com/PridaLab/cnn-ripple</ext-link>, (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:b38a5db56c84c61821347b603dd884169d8f7b1c;origin=https://github.com/PridaLab/cnn-ripple;visit=swh:1:snp:5785d27d319d84076a2353540d821d26346be009;anchor=swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b">swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b</ext-link>; <xref ref-type="bibr" rid="bib1">Amaducci and Navas-Olive, 2021</xref>) and MATLAB: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/cnn-matlab">https://github.com/PridaLab/cnn-matlab</ext-link> (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:0e369cb7c13f28d016e1b55a1f3e0242bc91ec91;origin=https://github.com/PridaLab/cnn-matlab;visit=swh:1:snp:0eef36d3eec9e00833377db989809596bda847ac;anchor=swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33">swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33</ext-link>; <xref ref-type="bibr" rid="bib45">Navas-Olive and Esparza, 2022</xref>). Code visualization and detection is shown in an interactive notebook <ext-link ext-link-type="uri" xlink:href="https://colab.research.google.com/github/PridaLab/cnn-ripple/blob/main/src/notebooks/cnn-example.ipynb">https://colab.research.google.com/github/PridaLab/cnn-ripple/blob/main/src/notebooks/cnn-example.ipynb</ext-link>.</p></sec><sec id="s4-8"><title>Offline detection of SWR events with Butterworth filters</title><p>Standard ripple detection tools are based on spectral filters. In order to compare online and offline performance, we adopted the OE bandpass (100&#8211;300 Hz passband) second-order Butterworth filter as the gold standard. We confirmed that the choosen filter parameters provided optimal performance when tested with the same training set used for the CNN. Offline filter detection was computed in MATLAB R2019b, using the <italic>Butterworth</italic> filter (100&#8211;300 Hz passband) and a non-casual filter <italic>filtfilt</italic> to avoid phase lags. In order to compute the envelope, the filtered signal was amplified twice, filtered by a fourth-order Savitzky-Golay filter, and then smoothed by two consecutive <italic>movmean</italic> sliding windows (2.3 and 6.7 ms). A detection had to fulfill two conditions: the envelope had to surpass a first threshold, which will define the ripple beginning and end, and a second threshold to be considered a detection. Detections closer than 15 ms were merged. Performance of ripple detection methods is very sensitive to the chosen threshold. To look for the fairest comparison, we made predictions for all possible combinations of the first threshold being 1, 1.5, 2, 2.5 times the envelope standard deviation, and the second threshold being 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10 times the envelope standard deviation (giving a total of 60 threshold combinations). We then chose the one that scored the maximum F1. This was done separately for each session.</p><p>Online detections were defined whenever the filtered signal was above a single threshold (see next section). To exclude for artifacts and to cope with detection standards (<xref ref-type="bibr" rid="bib20">Fern&#225;ndez-Ruiz et al., 2019</xref>), an additional non-ripple channel was used to veto high-frequency noise detections.</p></sec><sec id="s4-9"><title>Characterization of SWRs</title><p>Ripple properties were computed using a 100 ms window around the center of the event of the pyramidal channel of the raw LFP. Preferred frequency was computed first by calculating the power spectrum of the 100 ms interval using the enlarged bandpass filter 70 and 400 Hz, and then looking for the frequency of the maximum power. In order to account for the exponential power decay in higher frequencies, we subtracted a fitted exponential curve (&#8216;fitnlm&#8217; from MATLAB toolbox) before looking for the preferred frequency. The &lt;100 Hz contribution was computed as the sum of the power values for all frequencies lower than 100 Hz normalized by the sum of all power values for all frequencies (of note, no subtraction was applied to this power spectrum). The entropy was computed using a normalized power spectrum (divided by the sum of all power values along all frequencies) as:<disp-formula id="equ6"><mml:math id="m6"><mml:mi>E</mml:mi><mml:mi>n</mml:mi><mml:mi>t</mml:mi><mml:mi>r</mml:mi><mml:mi>o</mml:mi><mml:mi>p</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mo>-</mml:mo><mml:mrow><mml:mo>&#8721;</mml:mo><mml:mrow><mml:mi>P</mml:mi><mml:mi>o</mml:mi><mml:mi>w</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:mrow><mml:mfenced separators="|"><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:mfenced><mml:mo>&#8901;</mml:mo><mml:msub><mml:mrow><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mfenced separators="|"><mml:mrow><mml:mi>P</mml:mi><mml:mi>o</mml:mi><mml:mi>w</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mfenced separators="|"><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:mfenced></mml:mrow></mml:mfenced></mml:math></disp-formula></p></sec><sec id="s4-10"><title>OE custom plugins for online detection</title><p>Two custom plugins were developed using OE GUI 0.4.6 software in a personal computer. The first plugin was designed to detect when a signal crossed a determined amplitude threshold, defined as the signal standard deviation multiplied by some number. It was used in combination with the Bandpass Filter plugin, which implements a Butterworth filter, so the input for the crossing detector was a filtered signal. In order to avoid artifacts, we use a second input channel from a separate region defined by the experimenter. Events detected in both channels were discarded.</p><p>The second plugin was developed to operate the CNN and it used the Tensorflow 2.3.0 API for C (<ext-link ext-link-type="uri" xlink:href="https://www.tensorflow.org/install/lang_c">https://www.tensorflow.org/install/lang_c</ext-link>). Since the network was trained to work with data sampled at 1250 Hz, the plugin down-sampled the input channels. It also separated data into windows of 12.8 ms and 8-channels to feed into the CNN every 6.4 ms. Detection threshold was defined as a probability between 0 and 1, and it was manually adjusted by the experimenter.</p><p>Both plugins normalized the input data using z-score normalization. They require a short calibration time (about 1 min) to calculate the mean and standard deviation of the signals. The user could establish the detection thresholds and when an event is found a signal is sent through a selected output channel. For OE simulated experiments we used the same setup but the data was read from a file instead of streamed directly from the experiment. Events in simulated experiments were detected similarly as real-time experiments. Detection plugin: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/CNNRippleDetectorOEPlugin">https://github.com/PridaLab/CNNRippleDetectorOEPlugin</ext-link> (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:51e0c5075544575e708941e122e108bd7f719b4e;origin=https://github.com/PridaLab/CNNRippleDetectorOEPlugin;visit=swh:1:snp:02c88c2080e1aadd30d7938c16bd3043e65684f2;anchor=swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190">swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190</ext-link>; <xref ref-type="bibr" rid="bib17">Esparza, 2022</xref>).</p></sec><sec id="s4-11"><title>Closed-loop optogenetic experiments</title><p>For closed-loop experiments, the output channel from the OE plugin was fed into an Arduino board (Nano ATmega328) using an USB 3.0 connection. Optogenetic stimulation was performed with integrated &#181;LED optoelectrodes using the OSC1Lite driver from NeuroLight Technologies controlled by the Arduino. Microwatt blue light stimulation at 10&#8211;20 &#181;W was used to activate cell-type-specific ChR2. Specificity of viral expression and localization of probe tracks were histologically assessed after experiments.</p></sec><sec id="s4-12"><title>Computing the kernel saliency maps</title><p>During training, kernels weights are updated so each of them specializes in detecting a particular feature of SWR input data. In order to interpret these features, we adapted a methodology used in two-dimensional CNNs for image processing (<xref ref-type="bibr" rid="bib62">Simonyan et al., 2013</xref>). First, we created an input 8&#215;40 LFP signal with random values (standard normal distribution, with 0 mean and 0.01 standard deviation). Then, for each kernel we updated this input signal applying a stochastic gradient optimizer (tf.keras.optimizers.SGD) with a learning rate of 0.1, momentum 0.1, and a loss function equal to minus the normalized KA that produced such input (therefore making it gradient ascent). We repeated this optimization process until the mean squared error between the previous input and the optimized input was less than 10<sup>&#8211;9</sup>, or after 2000 iterations, whatever came first. The resulting input signal would be one that the chosen kernel is maximally responsive to. Code example for this process applied to the ResNet50V2 model of the ImageNet dataset (<ext-link ext-link-type="uri" xlink:href="https://www.image-net.org/">https://www.image-net.org/</ext-link>) can be found at the Keras documentation: <ext-link ext-link-type="uri" xlink:href="https://keras.io/examples/vision/visualizing_what_convnets_learn/">https://keras.io/examples/vision/visualizing_what_convnets_learn/</ext-link>.</p></sec><sec id="s4-13"><title>Uniform Manifold Approximation and Projection</title><p>UMAP is a dimensional reduction technique commonly used for visualization of multi-dimensional data in a two- or three-dimensional embedding (<xref ref-type="bibr" rid="bib40">McInnes et al., 2018</xref>). The embedding is found by searching a low dimensional projection with the closest equivalent fuzzy topological structure to that of the hyper-dimensional input data. We run UMAP version 0.5.1 (<ext-link ext-link-type="uri" xlink:href="https://umap-learn.readthedocs.io/en/latest/">https://umap-learn.readthedocs.io/en/latest/</ext-link>) in Python 3.7 Anaconda.</p><p>We applied UMAP to decode CNN operation using the network feature maps in response to a diversity of LFP inputs. Feature maps were built by concatenating the resulting KA from all the Convolutional layers resulting in a 1329-dimensional vector (CNN32; 3991 for CNN12). The goal was to compute the reduced two-dimensional UMAP embedding from a large number of LFP events.</p><p>We computed the UMAP embedding of the feature map of the CNN using 7491 SWR events and 7491 random events and projected them in a color scale reflecting the different labels. Two-dimensional UMAP embeddings were evaluated for different parameter combinations of the number of neighbors and the minimal distance. After noticing no major differences, we choose their default values.</p></sec><sec id="s4-14"><title>Pattern matching</title><p>Pattern matching between saliency maps from the different kernels and the LFP windows was computed using <italic>matchTemplate</italic> from CV2 package (version 4.5.1), OpenCV library for python, with the <italic>TM_CCORR</italic> template matching operation. It slides a template (saliency map) along the whole signal (LFP window) and outputs a measure on their similarity for each slide. LFP windows provided were 100 ms 8-channel (8&#215;125) z-scored windows around all true positive events, same number of true negative events, and all true positive and false positive events for both the training and validations sets. Windows were centered on the LFP minimum of the pyramidal channel closest to the maximum of the SWR envelope within a 10 ms window (envelope computed as described above).</p></sec><sec id="s4-15"><title>Simulated penetrations along Neuropixels probe</title><p>Simulated penetrations were obtained choosing Neuropixel electrodes with a relative distance similar to the &#956;LED optoelectrode probe. To this purpose, we chose Neuropixel external electrodes (64 &#956;m horizontal separation versus 70 &#956;m for the &#956;LED probe), alternating left and right for each row, so the vertical distance was 20 &#956;m (same as in &#956;LED probes). Therefore, a simulated penetration always consisted of eight neighboring electrodes (e.g. [1 4 5 8 9 12 13 16]). To evaluate changes across layers and regions, the simulated penetration was moved all along the Neuropixels probe in 93 steps (downward/upward) thus providing a continuous mapping of LFP signals. For example, the following penetration sequence spanned along the brain: [1 4 5 8 9 12 13 16], [5 8 9 12 13 16 17 20], [9 12 13 16 17 20 21 24], and so on.</p><p>For CSD analysis we proceeded similarly, but choosing LFP channels every 100 &#181;m to mimic a 16-channel silicon probe. CSD signals were calculated from the second spatial derivative. Smoothing was applied to CSD signals for visualization purposes only. Tissue conductivity was considered isotropic across layers.</p></sec><sec id="s4-16"><title>Quantification and statistical analysis</title><p>Statistical analysis was performed with Python 3.8.5 and/or MATLAB R2019b. No statistical method was used to predetermine sample sizes, which were similar to those reported elsewhere. Normality and homoscedasticity were confirmed with the Kolmogorov-Smirnov and Levene&#8217;s tests, respectively. The number of replications is detailed in the text and figures.</p><p>Several ways ANOVAs were applied for group analysis. Post hoc comparisons were evaluated with the Tukey-Kramer test and whenever required Bonferroni correction was applied. For paired comparisons the Student&#8217;s t-test was used. Correlation between variables was evaluated with the Pearson product-moment correlation coefficient, which was tested against 0 (i.e. no correlation was the null hypothesis) at p&lt;0.05 (two sided). In most cases values were z-scored (value &#8211; mean divided by the SD) to make data comparable between animals or across layers.</p></sec></sec></body><back><sec sec-type="additional-information" id="s5"><title>Additional information</title><fn-group content-type="competing-interest"><title>Competing interests</title><fn fn-type="COI-statement" id="conf1"><p>No competing interests declared</p></fn><fn fn-type="COI-statement" id="conf2"><p>No competing interests declared</p></fn><fn fn-type="COI-statement" id="conf3"><p>Reviewing editor, <italic>eLife</italic></p></fn></fn-group><fn-group content-type="author-contribution"><title>Author contributions</title><fn fn-type="con" id="con1"><p>Conceptualization, Software, Formal analysis, Investigation, Visualization, Methodology, Writing &#8211; review and editing</p></fn><fn fn-type="con" id="con2"><p>Conceptualization, Software, Formal analysis, Investigation, Visualization, Methodology, Writing &#8211; review and editing</p></fn><fn fn-type="con" id="con3"><p>Data curation, Validation, Investigation, Writing &#8211; review and editing</p></fn><fn fn-type="con" id="con4"><p>Software, Formal analysis</p></fn><fn fn-type="con" id="con5"><p>Conceptualization, Supervision, Funding acquisition, Validation, Investigation, Writing - original draft, Project administration, Writing &#8211; review and editing</p></fn></fn-group><fn-group content-type="ethics-information"><title>Ethics</title><fn fn-type="other"><p>All protocols and procedures were performed according to the Spanish legislation (R.D. 1201/2005 and L.32/2007) and the European Communities Council Directive 2003 (2003/65/CE). Experiments and procedures were approved by the Ethics Committee of the Instituto Cajal and the Spanish Research Council (PROEX131-16 and PROEX161-19). All surgical procedures were performed under isoflurane anesthesia and every effort was made to minimize suffering.</p></fn></fn-group></sec><sec sec-type="supplementary-material" id="s6"><title>Additional files</title><supplementary-material id="supp1"><label>Supplementary file 1.</label><caption><title>Sessions and animals used for the different analysis.</title></caption><media xlink:href="elife-77772-supp1-v2.docx" mimetype="application" mime-subtype="docx"/></supplementary-material><supplementary-material id="transrepform"><label>Transparent reporting form</label><media xlink:href="elife-77772-transrepform1-v2.docx" mimetype="application" mime-subtype="docx"/></supplementary-material></sec><sec sec-type="data-availability" id="s7"><title>Data availability</title><p>Data is deposited in the Figshare repository <ext-link ext-link-type="uri" xlink:href="https://figshare.com/projects/cnn-ripple-data/117897">https://figshare.com/projects/cnn-ripple-data/117897</ext-link>. The trained model is accessible at the Github repository for both Python: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/cnn-ripple">https://github.com/PridaLab/cnn-ripple</ext-link> (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:b38a5db56c84c61821347b603dd884169d8f7b1c;origin=https://github.com/PridaLab/cnn-ripple;visit=swh:1:snp:5785d27d319d84076a2353540d821d26346be009;anchor=swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b">swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b</ext-link>) and Matlab: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/cnn-matlab">https://github.com/PridaLab/cnn-matlab</ext-link> (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:0e369cb7c13f28d016e1b55a1f3e0242bc91ec91;origin=https://github.com/PridaLab/cnn-matlab;visit=swh:1:snp:0eef36d3eec9e00833377db989809596bda847ac;anchor=swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33">swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33</ext-link>). Code visualization and detection is shown in an interactive notebook <ext-link ext-link-type="uri" xlink:href="https://colab.research.google.com/github/PridaLab/cnn-ripple/blob/main/src/notebooks/cnn-example.ipynb">https://colab.research.google.com/github/PridaLab/cnn-ripple/blob/main/src/notebooks/cnn-example.ipynb</ext-link>. The online detection Open Ephys plugin is accessible at the Github repository: <ext-link ext-link-type="uri" xlink:href="https://github.com/PridaLab/CNNRippleDetectorOEPlugin">https://github.com/PridaLab/CNNRippleDetectorOEPlugin</ext-link> (copy archived at <ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:51e0c5075544575e708941e122e108bd7f719b4e;origin=https://github.com/PridaLab/CNNRippleDetectorOEPlugin;visit=swh:1:snp:02c88c2080e1aadd30d7938c16bd3043e65684f2;anchor=swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190">swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190</ext-link>).</p></sec><ack id="ack"><title>Acknowledgements</title><p>This work is supported by grants from Fundaci&#243;n La Caixa (LCF/PR/HR21/52410030; DeepCode). Access to the Artemisa high-performance computing infrastructure (NeuroConvo project) is supported by Universidad de Valencia and co-funded by the European Union through the 2014&#8211;2020 FEDER Operative Programme (IDIFEDER/2018/048). ANO and RA are supported by PhD fellowships from the Spanish Ministry of Education (FPU17/03268) and Universidad Aut&#243;noma de Madrid (FPI-UAM-2017), respectively. We thank Elena Cid for help with histological confirmation of the probe tracks and Pablo Varona for feedback and discussion. We also thank Aar&#243;n Cuevas for clarifications and support while developing the Open Ephys Plugin for online detection.</p></ack><ref-list><title>References</title><ref id="bib1"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Amaducci</surname><given-names>R</given-names></name><name><surname>Navas-Olive</surname><given-names>A</given-names></name></person-group><year iso-8601-date="2021">2021</year><data-title>Cnn-ripple</data-title><version designator="swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b">swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b</version><source>Software Heritage</source><ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:b38a5db56c84c61821347b603dd884169d8f7b1c;origin=https://github.com/PridaLab/cnn-ripple;visit=swh:1:snp:5785d27d319d84076a2353540d821d26346be009;anchor=swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b">https://archive.softwareheritage.org/swh:1:dir:b38a5db56c84c61821347b603dd884169d8f7b1c;origin=https://github.com/PridaLab/cnn-ripple;visit=swh:1:snp:5785d27d319d84076a2353540d821d26346be009;anchor=swh:1:rev:9dcc5b6a8267b89eb86a2813dbbcb74a621a701b</ext-link></element-citation></ref><ref id="bib2"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Atallah</surname><given-names>BV</given-names></name><name><surname>Scanziani</surname><given-names>M</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Instantaneous modulation of gamma oscillation frequency by balancing excitation with inhibition</article-title><source>Neuron</source><volume>62</volume><fpage>566</fpage><lpage>577</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2009.04.027</pub-id><pub-id pub-id-type="pmid">19477157</pub-id></element-citation></ref><ref id="bib3"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Bai</surname><given-names>S</given-names></name><name><surname>Kolter</surname><given-names>JZ</given-names></name><name><surname>Koltun</surname><given-names>V</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1803.01271">https://arxiv.org/abs/1803.01271</ext-link></element-citation></ref><ref id="bib4"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bartos</surname><given-names>M</given-names></name><name><surname>Vida</surname><given-names>I</given-names></name><name><surname>Jonas</surname><given-names>P</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>Synaptic mechanisms of synchronized gamma oscillations in inhibitory interneuron networks</article-title><source>Nature Reviews. Neuroscience</source><volume>8</volume><fpage>45</fpage><lpage>56</lpage><pub-id pub-id-type="doi">10.1038/nrn2044</pub-id><pub-id pub-id-type="pmid">17180162</pub-id></element-citation></ref><ref id="bib5"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Basu</surname><given-names>J</given-names></name><name><surname>Zaremba</surname><given-names>JD</given-names></name><name><surname>Cheung</surname><given-names>SK</given-names></name><name><surname>Hitti</surname><given-names>FL</given-names></name><name><surname>Zemelman</surname><given-names>BV</given-names></name><name><surname>Losonczy</surname><given-names>A</given-names></name><name><surname>Siegelbaum</surname><given-names>SA</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Gating of hippocampal activity, plasticity, and memory by entorhinal cortex long-range inhibition</article-title><source>Science</source><volume>351</volume><elocation-id>aaa5694</elocation-id><pub-id pub-id-type="doi">10.1126/science.aaa5694</pub-id><pub-id pub-id-type="pmid">26744409</pub-id></element-citation></ref><ref id="bib6"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bittner</surname><given-names>KC</given-names></name><name><surname>Grienberger</surname><given-names>C</given-names></name><name><surname>Vaidya</surname><given-names>SP</given-names></name><name><surname>Milstein</surname><given-names>AD</given-names></name><name><surname>Macklin</surname><given-names>JJ</given-names></name><name><surname>Suh</surname><given-names>J</given-names></name><name><surname>Tonegawa</surname><given-names>S</given-names></name><name><surname>Magee</surname><given-names>JC</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Conjunctive input processing drives feature selectivity in hippocampal CA1 neurons</article-title><source>Nature Neuroscience</source><volume>18</volume><fpage>1133</fpage><lpage>1142</lpage><pub-id pub-id-type="doi">10.1038/nn.4062</pub-id><pub-id pub-id-type="pmid">26167906</pub-id></element-citation></ref><ref id="bib7"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name><name><surname>Draguhn</surname><given-names>A</given-names></name></person-group><year iso-8601-date="2004">2004</year><article-title>Neuronal oscillations in cortical networks</article-title><source>Science</source><volume>304</volume><fpage>1926</fpage><lpage>1929</lpage><pub-id pub-id-type="doi">10.1126/science.1099745</pub-id><pub-id pub-id-type="pmid">15218136</pub-id></element-citation></ref><ref id="bib8"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name><name><surname>Anastassiou</surname><given-names>CA</given-names></name><name><surname>Koch</surname><given-names>C</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>The origin of extracellular fields and currents--EEG, ecog, LFP and spikes</article-title><source>Nature Reviews. Neuroscience</source><volume>13</volume><fpage>407</fpage><lpage>420</lpage><pub-id pub-id-type="doi">10.1038/nrn3241</pub-id><pub-id pub-id-type="pmid">22595786</pub-id></element-citation></ref><ref id="bib9"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Hippocampal sharp wave-ripple: A cognitive biomarker for episodic memory and planning</article-title><source>Hippocampus</source><volume>25</volume><fpage>1073</fpage><lpage>1188</lpage><pub-id pub-id-type="doi">10.1002/hipo.22488</pub-id><pub-id pub-id-type="pmid">26135716</pub-id></element-citation></ref><ref id="bib10"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name><name><surname>Schomburg</surname><given-names>EW</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>What does gamma coherence tell us about inter-regional neural communication?</article-title><source>Nature Neuroscience</source><volume>18</volume><fpage>484</fpage><lpage>489</lpage><pub-id pub-id-type="doi">10.1038/nn.3952</pub-id><pub-id pub-id-type="pmid">25706474</pub-id></element-citation></ref><ref id="bib11"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cohen</surname><given-names>MX</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Where does EEG come from and what does it mean?</article-title><source>Trends in Neurosciences</source><volume>40</volume><fpage>208</fpage><lpage>218</lpage><pub-id pub-id-type="doi">10.1016/j.tins.2017.02.004</pub-id><pub-id pub-id-type="pmid">28314445</pub-id></element-citation></ref><ref id="bib12"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Cun</surname><given-names>L</given-names></name><name><surname>Boser</surname><given-names>B</given-names></name><name><surname>Denker</surname><given-names>JS</given-names></name><name><surname>Henderson</surname><given-names>D</given-names></name><name><surname>Howard</surname><given-names>RE</given-names></name><name><surname>Hubbard</surname><given-names>W</given-names></name><name><surname>Jackel</surname><given-names>LD</given-names></name></person-group><year iso-8601-date="1990">1990</year><article-title>Handwritten digit recognition with a back-propagation network</article-title><conf-name>Neural Information Processing Systems</conf-name><fpage>396</fpage><lpage>404</lpage></element-citation></ref><ref id="bib13"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>de la Prida</surname><given-names>LM</given-names></name><name><surname>Huberfeld</surname><given-names>G</given-names></name><name><surname>Cohen</surname><given-names>I</given-names></name><name><surname>Miles</surname><given-names>R</given-names></name></person-group><year iso-8601-date="2006">2006</year><article-title>Threshold behavior in the initiation of hippocampal population bursts</article-title><source>Neuron</source><volume>49</volume><fpage>131</fpage><lpage>142</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2005.10.034</pub-id><pub-id pub-id-type="pmid">16387645</pub-id></element-citation></ref><ref id="bib14"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>de la Prida</surname><given-names>LM</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Potential factors influencing replay across CA1 during sharp-wave ripples</article-title><source>Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences</source><volume>375</volume><elocation-id>20190236</elocation-id><pub-id pub-id-type="doi">10.1098/rstb.2019.0236</pub-id><pub-id pub-id-type="pmid">32248778</pub-id></element-citation></ref><ref id="bib15"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Diba</surname><given-names>K</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>Forward and reverse hippocampal place-cell sequences during ripples</article-title><source>Nature Neuroscience</source><volume>10</volume><fpage>1241</fpage><lpage>1242</lpage><pub-id pub-id-type="doi">10.1038/nn1961</pub-id><pub-id pub-id-type="pmid">17828259</pub-id></element-citation></ref><ref id="bib16"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dutta</surname><given-names>S</given-names></name><name><surname>Ackermann</surname><given-names>E</given-names></name><name><surname>Kemere</surname><given-names>C</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Analysis of an open source, closed-loop, realtime system for hippocampal sharp-wave ripple disruption</article-title><source>Journal of Neural Engineering</source><volume>16</volume><elocation-id>016009</elocation-id><pub-id pub-id-type="doi">10.1088/1741-2552/aae90e</pub-id><pub-id pub-id-type="pmid">30507556</pub-id></element-citation></ref><ref id="bib17"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Esparza</surname><given-names>J</given-names></name></person-group><year iso-8601-date="2022">2022</year><data-title>CNN-ripple plugin for open ephys</data-title><version designator="swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190">swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190</version><source>Software Heritage</source><ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:51e0c5075544575e708941e122e108bd7f719b4e;origin=https://github.com/PridaLab/CNNRippleDetectorOEPlugin;visit=swh:1:snp:02c88c2080e1aadd30d7938c16bd3043e65684f2;anchor=swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190">https://archive.softwareheritage.org/swh:1:dir:51e0c5075544575e708941e122e108bd7f719b4e;origin=https://github.com/PridaLab/CNNRippleDetectorOEPlugin;visit=swh:1:snp:02c88c2080e1aadd30d7938c16bd3043e65684f2;anchor=swh:1:rev:52b182d1fba732a0bc3ad69ce9453c6fe96ae190</ext-link></element-citation></ref><ref id="bib18"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Farooq</surname><given-names>U</given-names></name><name><surname>Dragoi</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Emergence of preconfigured and plastic time-compressed sequences in early postnatal development</article-title><source>Science</source><volume>363</volume><fpage>168</fpage><lpage>173</lpage><pub-id pub-id-type="doi">10.1126/science.aav0502</pub-id><pub-id pub-id-type="pmid">30630930</pub-id></element-citation></ref><ref id="bib19"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fern&#225;ndez</surname><given-names>G</given-names></name><name><surname>Effern</surname><given-names>A</given-names></name><name><surname>Grunwald</surname><given-names>T</given-names></name><name><surname>Pezer</surname><given-names>N</given-names></name><name><surname>Lehnertz</surname><given-names>K</given-names></name><name><surname>D&#252;mpelmann</surname><given-names>M</given-names></name><name><surname>Van Roost</surname><given-names>D</given-names></name><name><surname>Elger</surname><given-names>CE</given-names></name></person-group><year iso-8601-date="1999">1999</year><article-title>Real-time tracking of memory formation in the human rhinal cortex and hippocampus</article-title><source>Science</source><volume>285</volume><fpage>1582</fpage><lpage>1585</lpage><pub-id pub-id-type="doi">10.1126/science.285.5433.1582</pub-id><pub-id pub-id-type="pmid">10477525</pub-id></element-citation></ref><ref id="bib20"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fern&#225;ndez-Ruiz</surname><given-names>A</given-names></name><name><surname>Oliva</surname><given-names>A</given-names></name><name><surname>Fermino de Oliveira</surname><given-names>E</given-names></name><name><surname>Rocha-Almeida</surname><given-names>F</given-names></name><name><surname>Tingley</surname><given-names>D</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Long-duration hippocampal sharp wave ripples improve memory</article-title><source>Science</source><volume>364</volume><fpage>1082</fpage><lpage>1086</lpage><pub-id pub-id-type="doi">10.1126/science.aax0758</pub-id><pub-id pub-id-type="pmid">31197012</pub-id></element-citation></ref><ref id="bib21"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Foster</surname><given-names>DJ</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Replay comes of age</article-title><source>Annual Review of Neuroscience</source><volume>40</volume><fpage>581</fpage><lpage>602</lpage><pub-id pub-id-type="doi">10.1146/annurev-neuro-072116-031538</pub-id><pub-id pub-id-type="pmid">28772098</pub-id></element-citation></ref><ref id="bib22"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Frey</surname><given-names>M</given-names></name><name><surname>Tanni</surname><given-names>S</given-names></name><name><surname>Perrodin</surname><given-names>C</given-names></name><name><surname>O&#8217;Leary</surname><given-names>A</given-names></name><name><surname>Nau</surname><given-names>M</given-names></name><name><surname>Kelly</surname><given-names>J</given-names></name><name><surname>Banino</surname><given-names>A</given-names></name><name><surname>Bendor</surname><given-names>D</given-names></name><name><surname>Lefort</surname><given-names>J</given-names></name><name><surname>Doeller</surname><given-names>CF</given-names></name><name><surname>Barry</surname><given-names>C</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>Interpreting wide-band neural activity using convolutional neural networks</article-title><source>eLife</source><volume>10</volume><elocation-id>e66551</elocation-id><pub-id pub-id-type="doi">10.7554/eLife.66551</pub-id><pub-id pub-id-type="pmid">34338632</pub-id></element-citation></ref><ref id="bib23"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Friston</surname><given-names>KJ</given-names></name><name><surname>Bastos</surname><given-names>AM</given-names></name><name><surname>Pinotsis</surname><given-names>D</given-names></name><name><surname>Litvak</surname><given-names>V</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>LFP and oscillations-what do they tell us?</article-title><source>Current Opinion in Neurobiology</source><volume>31</volume><fpage>1</fpage><lpage>6</lpage><pub-id pub-id-type="doi">10.1016/j.conb.2014.05.004</pub-id><pub-id pub-id-type="pmid">25079053</pub-id></element-citation></ref><ref id="bib24"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Girardeau</surname><given-names>G</given-names></name><name><surname>Benchenane</surname><given-names>K</given-names></name><name><surname>Wiener</surname><given-names>SI</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name><name><surname>Zugaro</surname><given-names>MB</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Selective suppression of hippocampal ripples impairs spatial memory</article-title><source>Nature Neuroscience</source><volume>12</volume><fpage>1222</fpage><lpage>1223</lpage><pub-id pub-id-type="doi">10.1038/nn.2384</pub-id><pub-id pub-id-type="pmid">19749750</pub-id></element-citation></ref><ref id="bib25"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gridchyn</surname><given-names>I</given-names></name><name><surname>Schoenenberger</surname><given-names>P</given-names></name><name><surname>O&#8217;Neill</surname><given-names>J</given-names></name><name><surname>Csicsvari</surname><given-names>J</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Assembly-specific disruption of hippocampal replay leads to selective memory deficit</article-title><source>Neuron</source><volume>106</volume><fpage>291</fpage><lpage>300</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2020.01.021</pub-id><pub-id pub-id-type="pmid">32070475</pub-id></element-citation></ref><ref id="bib26"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Grosmark</surname><given-names>AD</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Diversity in neural firing dynamics supports both rigid and learned hippocampal sequences</article-title><source>Science</source><volume>351</volume><fpage>1440</fpage><lpage>1443</lpage><pub-id pub-id-type="doi">10.1126/science.aad1935</pub-id><pub-id pub-id-type="pmid">27013730</pub-id></element-citation></ref><ref id="bib27"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hagen</surname><given-names>E</given-names></name><name><surname>Chambers</surname><given-names>AR</given-names></name><name><surname>Einevoll</surname><given-names>GT</given-names></name><name><surname>Pettersen</surname><given-names>KH</given-names></name><name><surname>Enger</surname><given-names>R</given-names></name><name><surname>Stasik</surname><given-names>AJ</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>RippleNet: a recurrent neural network for sharp wave ripple (SPW-R) detection</article-title><source>Neuroinformatics</source><volume>19</volume><fpage>493</fpage><lpage>514</lpage><pub-id pub-id-type="doi">10.1007/s12021-020-09496-2</pub-id><pub-id pub-id-type="pmid">33394388</pub-id></element-citation></ref><ref id="bib28"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Ioffe</surname><given-names>S</given-names></name><name><surname>Szegedy</surname><given-names>C.</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Batch normalization: Accelerating deep network training by reducing internal covariate shift32nd</article-title><conf-name>International Conference on Machine Learning, ICML 2015. International Machine Learning Society (IMLS)</conf-name><fpage>448</fpage><lpage>456</lpage></element-citation></ref><ref id="bib29"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jadhav</surname><given-names>SP</given-names></name><name><surname>Kemere</surname><given-names>C</given-names></name><name><surname>German</surname><given-names>PW</given-names></name><name><surname>Frank</surname><given-names>LM</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Awake hippocampal sharp-wave ripples support spatial memory</article-title><source>Science</source><volume>336</volume><fpage>1454</fpage><lpage>1458</lpage><pub-id pub-id-type="doi">10.1126/science.1217230</pub-id><pub-id pub-id-type="pmid">22555434</pub-id></element-citation></ref><ref id="bib30"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Joo</surname><given-names>HR</given-names></name><name><surname>Frank</surname><given-names>LM</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>The hippocampal sharp wave-ripple in memory retrieval for immediate use and consolidation</article-title><source>Nature Reviews. Neuroscience</source><volume>19</volume><fpage>744</fpage><lpage>757</lpage><pub-id pub-id-type="doi">10.1038/s41583-018-0077-1</pub-id><pub-id pub-id-type="pmid">30356103</pub-id></element-citation></ref><ref id="bib31"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jun</surname><given-names>JJ</given-names></name><name><surname>Steinmetz</surname><given-names>NA</given-names></name><name><surname>Siegle</surname><given-names>JH</given-names></name><name><surname>Denman</surname><given-names>DJ</given-names></name><name><surname>Bauza</surname><given-names>M</given-names></name><name><surname>Barbarits</surname><given-names>B</given-names></name><name><surname>Lee</surname><given-names>AK</given-names></name><name><surname>Anastassiou</surname><given-names>CA</given-names></name><name><surname>Andrei</surname><given-names>A</given-names></name><name><surname>Ayd&#305;n</surname><given-names>&#199;</given-names></name><name><surname>Barbic</surname><given-names>M</given-names></name><name><surname>Blanche</surname><given-names>TJ</given-names></name><name><surname>Bonin</surname><given-names>V</given-names></name><name><surname>Couto</surname><given-names>J</given-names></name><name><surname>Dutta</surname><given-names>B</given-names></name><name><surname>Gratiy</surname><given-names>SL</given-names></name><name><surname>Gutnisky</surname><given-names>DA</given-names></name><name><surname>H&#228;usser</surname><given-names>M</given-names></name><name><surname>Karsh</surname><given-names>B</given-names></name><name><surname>Ledochowitsch</surname><given-names>P</given-names></name><name><surname>Lopez</surname><given-names>CM</given-names></name><name><surname>Mitelut</surname><given-names>C</given-names></name><name><surname>Musa</surname><given-names>S</given-names></name><name><surname>Okun</surname><given-names>M</given-names></name><name><surname>Pachitariu</surname><given-names>M</given-names></name><name><surname>Putzeys</surname><given-names>J</given-names></name><name><surname>Rich</surname><given-names>PD</given-names></name><name><surname>Rossant</surname><given-names>C</given-names></name><name><surname>Sun</surname><given-names>W-L</given-names></name><name><surname>Svoboda</surname><given-names>K</given-names></name><name><surname>Carandini</surname><given-names>M</given-names></name><name><surname>Harris</surname><given-names>KD</given-names></name><name><surname>Koch</surname><given-names>C</given-names></name><name><surname>O&#8217;Keefe</surname><given-names>J</given-names></name><name><surname>Harris</surname><given-names>TD</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Fully integrated silicon probes for high-density recording of neural activity</article-title><source>Nature</source><volume>551</volume><fpage>232</fpage><lpage>236</lpage><pub-id pub-id-type="doi">10.1038/nature24636</pub-id><pub-id pub-id-type="pmid">29120427</pub-id></element-citation></ref><ref id="bib32"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kamondi</surname><given-names>A</given-names></name><name><surname>Acs&#225;dy</surname><given-names>L</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="1998">1998</year><article-title>Dendritic spikes are enhanced by cooperative network activity in the intact hippocampus</article-title><source>The Journal of Neuroscience</source><volume>18</volume><fpage>3919</fpage><lpage>3928</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.18-10-03919.1998</pub-id><pub-id pub-id-type="pmid">9570819</pub-id></element-citation></ref><ref id="bib33"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Karimi Abadchi</surname><given-names>J</given-names></name><name><surname>Nazari-Ahangarkolaee</surname><given-names>M</given-names></name><name><surname>Gattas</surname><given-names>S</given-names></name><name><surname>Bermudez-Contreras</surname><given-names>E</given-names></name><name><surname>Luczak</surname><given-names>A</given-names></name><name><surname>McNaughton</surname><given-names>BL</given-names></name><name><surname>Mohajerani</surname><given-names>MH</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Spatiotemporal patterns of neocortical activity around hippocampal sharp-wave ripples</article-title><source>eLife</source><volume>9</volume><elocation-id>e51972</elocation-id><pub-id pub-id-type="doi">10.7554/eLife.51972</pub-id><pub-id pub-id-type="pmid">32167467</pub-id></element-citation></ref><ref id="bib34"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Kingma</surname><given-names>DP</given-names></name><name><surname>Ba</surname><given-names>JL</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Adam: A method for stochastic optimization</article-title><conf-name>International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings. International Conference on Learning Representations, ICLR</conf-name></element-citation></ref><ref id="bib35"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kitamura</surname><given-names>T</given-names></name><name><surname>Macdonald</surname><given-names>CJ</given-names></name><name><surname>Tonegawa</surname><given-names>S</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Entorhinal-hippocampal neuronal circuits bridge temporally discontiguous events</article-title><source>Learning &amp; Memory</source><volume>22</volume><fpage>438</fpage><lpage>443</lpage><pub-id pub-id-type="doi">10.1101/lm.038687.115</pub-id><pub-id pub-id-type="pmid">26286654</pub-id></element-citation></ref><ref id="bib36"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Klausberger</surname><given-names>T</given-names></name><name><surname>Marton</surname><given-names>LF</given-names></name><name><surname>O&#8217;Neill</surname><given-names>J</given-names></name><name><surname>Huck</surname><given-names>JHJ</given-names></name><name><surname>Dalezios</surname><given-names>Y</given-names></name><name><surname>Fuentealba</surname><given-names>P</given-names></name><name><surname>Suen</surname><given-names>WY</given-names></name><name><surname>Papp</surname><given-names>E</given-names></name><name><surname>Kaneko</surname><given-names>T</given-names></name><name><surname>Watanabe</surname><given-names>M</given-names></name><name><surname>Csicsvari</surname><given-names>J</given-names></name><name><surname>Somogyi</surname><given-names>P</given-names></name></person-group><year iso-8601-date="2005">2005</year><article-title>Complementary roles of cholecystokinin- and parvalbumin-expressing gabaergic neurons in hippocampal network oscillations</article-title><source>The Journal of Neuroscience</source><volume>25</volume><fpage>9782</fpage><lpage>9793</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.3269-05.2005</pub-id><pub-id pub-id-type="pmid">16237182</pub-id></element-citation></ref><ref id="bib37"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Klausberger</surname><given-names>T</given-names></name><name><surname>Somogyi</surname><given-names>P</given-names></name></person-group><year iso-8601-date="2008">2008</year><article-title>Neuronal diversity and temporal dynamics: the unity of hippocampal circuit operations</article-title><source>Science</source><volume>321</volume><fpage>53</fpage><lpage>57</lpage><pub-id pub-id-type="doi">10.1126/science.1149381</pub-id><pub-id pub-id-type="pmid">18599766</pub-id></element-citation></ref><ref id="bib38"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Maas</surname><given-names>AL</given-names></name><name><surname>Maas</surname><given-names>AL</given-names></name><name><surname>Hannun</surname><given-names>AY</given-names></name><name><surname>Ng</surname><given-names>AY.</given-names></name></person-group><year iso-8601-date="2013">2013</year><source>Rectifier nonlinearities improve neural network acoustic models</source><publisher-name>ICML Work Deep Learn AUDIO, SPEECH Lang Process</publisher-name></element-citation></ref><ref id="bib39"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Mahendran</surname><given-names>A</given-names></name><name><surname>Vedaldi</surname><given-names>A</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Understanding Deep Image Representations by Inverting Them</article-title><conf-name>Proceedings. IEEE Computer Society Conference on Computer Vision and Pattern Recognition</conf-name><fpage>5188</fpage><lpage>5196</lpage><pub-id pub-id-type="doi">10.48550/arXiv.1412.0035</pub-id></element-citation></ref><ref id="bib40"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McInnes</surname><given-names>L</given-names></name><name><surname>Healy</surname><given-names>J</given-names></name><name><surname>Saul</surname><given-names>N</given-names></name><name><surname>Gro&#223;berger</surname><given-names>L</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>UMAP: uniform manifold approximation and projection</article-title><source>Journal of Open Source Software</source><volume>3</volume><elocation-id>861</elocation-id><pub-id pub-id-type="doi">10.21105/joss.00861</pub-id></element-citation></ref><ref id="bib41"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mizuseki</surname><given-names>K</given-names></name><name><surname>Diba</surname><given-names>K</given-names></name><name><surname>Pastalkova</surname><given-names>E</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2011">2011</year><article-title>Hippocampal CA1 pyramidal cells form functionally distinct sublayers</article-title><source>Nature Neuroscience</source><volume>14</volume><fpage>1174</fpage><lpage>1181</lpage><pub-id pub-id-type="doi">10.1038/nn.2894</pub-id><pub-id pub-id-type="pmid">21822270</pub-id></element-citation></ref><ref id="bib42"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Modi</surname><given-names>ME</given-names></name><name><surname>Sahin</surname><given-names>M</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Translational use of event-related potentials to assess circuit integrity in ASD</article-title><source>Nature Reviews. Neurology</source><volume>13</volume><fpage>160</fpage><lpage>170</lpage><pub-id pub-id-type="doi">10.1038/nrneurol.2017.15</pub-id><pub-id pub-id-type="pmid">28211449</pub-id></element-citation></ref><ref id="bib43"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nakashiba</surname><given-names>T</given-names></name><name><surname>Buhl</surname><given-names>DL</given-names></name><name><surname>McHugh</surname><given-names>TJ</given-names></name><name><surname>Tonegawa</surname><given-names>S</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Hippocampal CA3 output is crucial for ripple-associated reactivation and consolidation of memory</article-title><source>Neuron</source><volume>62</volume><fpage>781</fpage><lpage>787</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2009.05.013</pub-id><pub-id pub-id-type="pmid">19555647</pub-id></element-citation></ref><ref id="bib44"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Navas-Olive</surname><given-names>A</given-names></name><name><surname>Valero</surname><given-names>M</given-names></name><name><surname>Jurado-Parras</surname><given-names>T</given-names></name><name><surname>de Salas-Quiroga</surname><given-names>A</given-names></name><name><surname>Averkin</surname><given-names>RG</given-names></name><name><surname>Gambino</surname><given-names>G</given-names></name><name><surname>Cid</surname><given-names>E</given-names></name><name><surname>de la Prida</surname><given-names>LM</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Multimodal determinants of phase-locked dynamics across deep-superficial hippocampal sublayers during theta oscillations</article-title><source>Nature Communications</source><volume>11</volume><elocation-id>2217</elocation-id><pub-id pub-id-type="doi">10.1038/s41467-020-15840-6</pub-id><pub-id pub-id-type="pmid">32371879</pub-id></element-citation></ref><ref id="bib45"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Navas-Olive</surname><given-names>V</given-names></name><name><surname>Esparza</surname><given-names>J</given-names></name></person-group><year iso-8601-date="2022">2022</year><data-title>Cnn-matlab</data-title><version designator="swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33">swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33</version><source>Software Heritage</source><ext-link ext-link-type="uri" xlink:href="https://archive.softwareheritage.org/swh:1:dir:0e369cb7c13f28d016e1b55a1f3e0242bc91ec91;origin=https://github.com/PridaLab/cnn-matlab;visit=swh:1:snp:0eef36d3eec9e00833377db989809596bda847ac;anchor=swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33">https://archive.softwareheritage.org/swh:1:dir:0e369cb7c13f28d016e1b55a1f3e0242bc91ec91;origin=https://github.com/PridaLab/cnn-matlab;visit=swh:1:snp:0eef36d3eec9e00833377db989809596bda847ac;anchor=swh:1:rev:060b2ff6e4b6c5eacb9799addd5123ad06eaaf33</ext-link></element-citation></ref><ref id="bib46"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Niedermeyer</surname><given-names>E</given-names></name><name><surname>Silva</surname><given-names>F</given-names></name></person-group><year iso-8601-date="2005">2005</year><source>Electroencephalography: Basic Principles, Clinical Applications, And Related Fields</source><publisher-name>Wolters Kluwer</publisher-name></element-citation></ref><ref id="bib47"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Olafsd&#243;ttir</surname><given-names>HF</given-names></name><name><surname>Bush</surname><given-names>D</given-names></name><name><surname>Barry</surname><given-names>C</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>The role of hippocampal replay in memory and planning</article-title><source>Current Biology</source><volume>28</volume><fpage>R37</fpage><lpage>R50</lpage><pub-id pub-id-type="doi">10.1016/j.cub.2017.10.073</pub-id><pub-id pub-id-type="pmid">29316421</pub-id></element-citation></ref><ref id="bib48"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oliva</surname><given-names>A</given-names></name><name><surname>Fern&#225;ndez-Ruiz</surname><given-names>A</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name><name><surname>Ber&#233;nyi</surname><given-names>A</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Role of hippocampal CA2 region in triggering sharp-wave ripples</article-title><source>Neuron</source><volume>91</volume><fpage>1342</fpage><lpage>1355</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2016.08.008</pub-id><pub-id pub-id-type="pmid">27593179</pub-id></element-citation></ref><ref id="bib49"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oliva</surname><given-names>A</given-names></name><name><surname>Fern&#225;ndez-Ruiz</surname><given-names>A</given-names></name><name><surname>Oliveira</surname><given-names>E</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Origin of gamma frequency power during hippocampal sharp-wave ripples</article-title><source>Cell Reports</source><volume>25</volume><fpage>1693</fpage><lpage>1700</lpage><pub-id pub-id-type="doi">10.1016/j.celrep.2018.10.066</pub-id></element-citation></ref><ref id="bib50"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oliva</surname><given-names>A</given-names></name><name><surname>Fern&#225;ndez-Ruiz</surname><given-names>A</given-names></name><name><surname>Leroy</surname><given-names>F</given-names></name><name><surname>Siegelbaum</surname><given-names>SA</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Hippocampal CA2 sharp-wave ripples reactivate and promote social memory</article-title><source>Nature</source><volume>587</volume><fpage>264</fpage><lpage>269</lpage><pub-id pub-id-type="doi">10.1038/s41586-020-2758-y</pub-id><pub-id pub-id-type="pmid">32968277</pub-id></element-citation></ref><ref id="bib51"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Patel</surname><given-names>J</given-names></name><name><surname>Schomburg</surname><given-names>EW</given-names></name><name><surname>Ber&#233;nyi</surname><given-names>A</given-names></name><name><surname>Fujisawa</surname><given-names>S</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Local generation and propagation of ripples along the septotemporal axis of the hippocampus</article-title><source>The Journal of Neuroscience</source><volume>33</volume><fpage>17029</fpage><lpage>17041</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.2036-13.2013</pub-id><pub-id pub-id-type="pmid">24155307</pub-id></element-citation></ref><ref id="bib52"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Peters</surname><given-names>A</given-names></name></person-group><year iso-8601-date="2022">2022</year><data-title>AP_histology</data-title><source>GitHub</source><ext-link ext-link-type="uri" xlink:href="https://github.com/petersaj/AP_histology">https://github.com/petersaj/AP_histology</ext-link></element-citation></ref><ref id="bib53"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pfeiffer</surname><given-names>BE</given-names></name><name><surname>Foster</surname><given-names>DJ</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Place cells: autoassociative dynamics in the generation of sequences of hippocampal place cells</article-title><source>Science</source><volume>349</volume><fpage>180</fpage><lpage>183</lpage><pub-id pub-id-type="doi">10.1126/science.aaa9633</pub-id><pub-id pub-id-type="pmid">26160946</pub-id></element-citation></ref><ref id="bib54"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pfeiffer</surname><given-names>BE</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>The content of hippocampal &#8220;replay.&#8221;</article-title><source>Hippocampus</source><volume>30</volume><fpage>6</fpage><lpage>18</lpage><pub-id pub-id-type="doi">10.1002/hipo.22824</pub-id><pub-id pub-id-type="pmid">29266510</pub-id></element-citation></ref><ref id="bib55"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ramirez-Villegas</surname><given-names>JF</given-names></name><name><surname>Logothetis</surname><given-names>NK</given-names></name><name><surname>Besserve</surname><given-names>M</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Diversity of sharp-wave-ripple LFP signatures reveals differentiated brain-wide dynamical events</article-title><source>PNAS</source><volume>112</volume><fpage>E6379</fpage><lpage>E6387</lpage><pub-id pub-id-type="doi">10.1073/pnas.1518257112</pub-id><pub-id pub-id-type="pmid">26540729</pub-id></element-citation></ref><ref id="bib56"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Redmon</surname><given-names>J</given-names></name><name><surname>Divvala</surname><given-names>S</given-names></name><name><surname>Girshick</surname><given-names>R</given-names></name><name><surname>Farhadi</surname><given-names>A</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>You Only Look Once: Unified, Real-Time Object Detection</article-title><conf-name>Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit 2016-Decem</conf-name><fpage>779</fpage><lpage>788</lpage><pub-id pub-id-type="doi">10.48550/arXiv.1506.02640</pub-id></element-citation></ref><ref id="bib57"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Richards</surname><given-names>BA</given-names></name><name><surname>Lillicrap</surname><given-names>TP</given-names></name><name><surname>Beaudoin</surname><given-names>P</given-names></name><name><surname>Bengio</surname><given-names>Y</given-names></name><name><surname>Bogacz</surname><given-names>R</given-names></name><name><surname>Christensen</surname><given-names>A</given-names></name><name><surname>Clopath</surname><given-names>C</given-names></name><name><surname>Costa</surname><given-names>RP</given-names></name><name><surname>de Berker</surname><given-names>A</given-names></name><name><surname>Ganguli</surname><given-names>S</given-names></name><name><surname>Gillon</surname><given-names>CJ</given-names></name><name><surname>Hafner</surname><given-names>D</given-names></name><name><surname>Kepecs</surname><given-names>A</given-names></name><name><surname>Kriegeskorte</surname><given-names>N</given-names></name><name><surname>Latham</surname><given-names>P</given-names></name><name><surname>Lindsay</surname><given-names>GW</given-names></name><name><surname>Miller</surname><given-names>KD</given-names></name><name><surname>Naud</surname><given-names>R</given-names></name><name><surname>Pack</surname><given-names>CC</given-names></name><name><surname>Poirazi</surname><given-names>P</given-names></name><name><surname>Roelfsema</surname><given-names>P</given-names></name><name><surname>Sacramento</surname><given-names>J</given-names></name><name><surname>Saxe</surname><given-names>A</given-names></name><name><surname>Scellier</surname><given-names>B</given-names></name><name><surname>Schapiro</surname><given-names>AC</given-names></name><name><surname>Senn</surname><given-names>W</given-names></name><name><surname>Wayne</surname><given-names>G</given-names></name><name><surname>Yamins</surname><given-names>D</given-names></name><name><surname>Zenke</surname><given-names>F</given-names></name><name><surname>Zylberberg</surname><given-names>J</given-names></name><name><surname>Therien</surname><given-names>D</given-names></name><name><surname>Kording</surname><given-names>KP</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>A deep learning framework for neuroscience</article-title><source>Nature Neuroscience</source><volume>22</volume><fpage>1761</fpage><lpage>1770</lpage><pub-id pub-id-type="doi">10.1038/s41593-019-0520-2</pub-id><pub-id pub-id-type="pmid">31659335</pub-id></element-citation></ref><ref id="bib58"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rosenblatt</surname><given-names>F</given-names></name></person-group><year iso-8601-date="1958">1958</year><article-title>The perceptron: A probabilistic model for information storage and organization in the brain</article-title><source>Psychological Review</source><volume>65</volume><fpage>386</fpage><lpage>408</lpage><pub-id pub-id-type="doi">10.1037/h0042519</pub-id><pub-id pub-id-type="pmid">13602029</pub-id></element-citation></ref><ref id="bib59"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Roumis</surname><given-names>DK</given-names></name><name><surname>Frank</surname><given-names>LM</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Hippocampal sharp-wave ripples in waking and sleeping states</article-title><source>Current Opinion in Neurobiology</source><volume>35</volume><fpage>6</fpage><lpage>12</lpage><pub-id pub-id-type="doi">10.1016/j.conb.2015.05.001</pub-id><pub-id pub-id-type="pmid">26011627</pub-id></element-citation></ref><ref id="bib60"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Shamash</surname><given-names>P</given-names></name><name><surname>Carandini</surname><given-names>M</given-names></name><name><surname>Harris</surname><given-names>KD</given-names></name><name><surname>Steinmetz</surname><given-names>NA</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>A Tool for Analyzing Electrode Tracks from Slice Histology</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/447995</pub-id></element-citation></ref><ref id="bib61"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Siegle</surname><given-names>JH</given-names></name><name><surname>L&#243;pez</surname><given-names>AC</given-names></name><name><surname>Patel</surname><given-names>YA</given-names></name><name><surname>Abramov</surname><given-names>K</given-names></name><name><surname>Ohayon</surname><given-names>S</given-names></name><name><surname>Voigts</surname><given-names>J</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Open ephys: an open-source, plugin-based platform for multichannel electrophysiology</article-title><source>Journal of Neural Engineering</source><volume>14</volume><elocation-id>045003</elocation-id><pub-id pub-id-type="doi">10.1088/1741-2552/aa5eea</pub-id><pub-id pub-id-type="pmid">28169219</pub-id></element-citation></ref><ref id="bib62"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Simonyan</surname><given-names>K</given-names></name><name><surname>Vedaldi</surname><given-names>A</given-names></name><name><surname>Zisserman</surname><given-names>A</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps</article-title><conf-name>2nd Int Conf Learn Represent ICLR 2014 - Work Track Proc</conf-name></element-citation></ref><ref id="bib63"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sosa</surname><given-names>M</given-names></name><name><surname>Joo</surname><given-names>HR</given-names></name><name><surname>Frank</surname><given-names>LM</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Dorsal and ventral hippocampal sharp-wave ripples activate distinct nucleus accumbens networks</article-title><source>Neuron</source><volume>105</volume><fpage>725</fpage><lpage>741</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2019.11.022</pub-id><pub-id pub-id-type="pmid">31864947</pub-id></element-citation></ref><ref id="bib64"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Springenberg</surname><given-names>JT</given-names></name><name><surname>Dosovitskiy</surname><given-names>A</given-names></name><name><surname>Brox</surname><given-names>T</given-names></name><name><surname>Riedmiller</surname><given-names>M</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Striving for Simplicity: The All Convolutional Net</article-title><conf-name>Int Conf Learn Represent ICLR 2015 - Work Track Proc</conf-name></element-citation></ref><ref id="bib65"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stark</surname><given-names>E</given-names></name><name><surname>Roux</surname><given-names>L</given-names></name><name><surname>Eichler</surname><given-names>R</given-names></name><name><surname>Senzai</surname><given-names>Y</given-names></name><name><surname>Royer</surname><given-names>S</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Pyramidal cell-interneuron interactions underlie hippocampal ripple oscillations</article-title><source>Neuron</source><volume>83</volume><fpage>467</fpage><lpage>480</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2014.06.023</pub-id><pub-id pub-id-type="pmid">25033186</pub-id></element-citation></ref><ref id="bib66"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stark</surname><given-names>E</given-names></name><name><surname>Roux</surname><given-names>L</given-names></name><name><surname>Eichler</surname><given-names>R</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Local generation of multineuronal spike sequences in the hippocampal CA1 region</article-title><source>PNAS</source><volume>112</volume><fpage>10521</fpage><lpage>10526</lpage><pub-id pub-id-type="doi">10.1073/pnas.1508785112</pub-id><pub-id pub-id-type="pmid">26240336</pub-id></element-citation></ref><ref id="bib67"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sullivan</surname><given-names>D</given-names></name><name><surname>Csicsvari</surname><given-names>J</given-names></name><name><surname>Mizuseki</surname><given-names>K</given-names></name><name><surname>Montgomery</surname><given-names>S</given-names></name><name><surname>Diba</surname><given-names>K</given-names></name><name><surname>Buzs&#225;ki</surname><given-names>G</given-names></name></person-group><year iso-8601-date="2011">2011</year><article-title>Relationships between hippocampal sharp waves, ripples, and fast gamma oscillation: influence of dentate and entorhinal cortical activity</article-title><source>The Journal of Neuroscience</source><volume>31</volume><fpage>8605</fpage><lpage>8616</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.0294-11.2011</pub-id><pub-id pub-id-type="pmid">21653864</pub-id></element-citation></ref><ref id="bib68"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Tikhonov</surname><given-names>AN</given-names></name><name><surname>Arsenin</surname><given-names>VY.</given-names></name></person-group><year iso-8601-date="1977">1977</year><source>Solutions of Ill-Posed Problems</source><publisher-loc>New York</publisher-loc><publisher-name>Winston</publisher-name></element-citation></ref><ref id="bib69"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Valero</surname><given-names>M</given-names></name><name><surname>Cid</surname><given-names>E</given-names></name><name><surname>Averkin</surname><given-names>RG</given-names></name><name><surname>Aguilar</surname><given-names>J</given-names></name><name><surname>Sanchez-Aguilera</surname><given-names>A</given-names></name><name><surname>Viney</surname><given-names>TJ</given-names></name><name><surname>Gomez-Dominguez</surname><given-names>D</given-names></name><name><surname>Bellistri</surname><given-names>E</given-names></name><name><surname>de la Prida</surname><given-names>LM</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Determinants of different deep and superficial CA1 pyramidal cell dynamics during sharp-wave ripples</article-title><source>Nature Neuroscience</source><volume>18</volume><fpage>1281</fpage><lpage>1290</lpage><pub-id pub-id-type="doi">10.1038/nn.4074</pub-id><pub-id pub-id-type="pmid">26214372</pub-id></element-citation></ref><ref id="bib70"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van de Ven</surname><given-names>GM</given-names></name><name><surname>Trouche</surname><given-names>S</given-names></name><name><surname>McNamara</surname><given-names>CG</given-names></name><name><surname>Allen</surname><given-names>K</given-names></name><name><surname>Dupret</surname><given-names>D</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Hippocampal offline reactivation consolidates recently formed cell assembly patterns during sharp wave-ripples</article-title><source>Neuron</source><volume>92</volume><fpage>968</fpage><lpage>974</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2016.10.020</pub-id><pub-id pub-id-type="pmid">27840002</pub-id></element-citation></ref><ref id="bib71"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yamamoto</surname><given-names>J</given-names></name><name><surname>Tonegawa</surname><given-names>S</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Direct medial entorhinal cortex input to hippocampal CA1 is crucial for extended quiet awake replay</article-title><source>Neuron</source><volume>96</volume><fpage>217</fpage><lpage>227</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2017.09.017</pub-id><pub-id pub-id-type="pmid">28957670</pub-id></element-citation></ref></ref-list></back><sub-article article-type="editor-report" id="sa0"><front-stub><article-id pub-id-type="doi">10.7554/eLife.77772.sa0</article-id><title-group><article-title>Editor's evaluation</article-title></title-group><contrib-group><contrib contrib-type="author"><name><surname>Peyrache</surname><given-names>Adrien</given-names></name><role specific-use="editor">Reviewing Editor</role><aff><institution-wrap><institution-id institution-id-type="ror">https://ror.org/01pxwe438</institution-id><institution>McGill University</institution></institution-wrap><country>Canada</country></aff></contrib></contrib-group><related-object id="sa0ro1" object-id-type="id" object-id="10.1101/2022.03.11.483905" link-type="continued-by" xlink:href="https://sciety.org/articles/activity/10.1101/2022.03.11.483905"/></front-stub><body><p>This paper will be of interest to the neuroscience community studying brain oscillations. It presents a new method to detect sharp wave-ripples in the hippocampus with deep learning techniques, instead of the more traditional signal processing approach. The overall detection performance improves and this technique may help in identifying and characterizing previously undetected physiological events.</p></body></sub-article><sub-article article-type="decision-letter" id="sa1"><front-stub><article-id pub-id-type="doi">10.7554/eLife.77772.sa1</article-id><title-group><article-title>Decision letter</article-title></title-group><contrib-group content-type="section"><contrib contrib-type="editor"><name><surname>Peyrache</surname><given-names>Adrien</given-names></name><role>Reviewing Editor</role><aff><institution-wrap><institution-id institution-id-type="ror">https://ror.org/01pxwe438</institution-id><institution>McGill University</institution></institution-wrap><country>Canada</country></aff></contrib></contrib-group></front-stub><body><boxed-text id="sa2-box1"><p>Our editorial process produces two outputs: (i) <ext-link ext-link-type="uri" xlink:href="https://sciety.org/articles/activity/10.1101/2022.03.11.483905">public reviews</ext-link> designed to be posted alongside <ext-link ext-link-type="uri" xlink:href="https://www.biorxiv.org/content/10.1101/2022.03.11.483905v1">the preprint</ext-link> for the benefit of readers; (ii) feedback on the manuscript for the authors, including requests for revisions, shown below. We also include an acceptance summary that explains what the editors found interesting or important about the work.</p></boxed-text><p><bold>Decision letter after peer review:</bold></p><p>Thank you for submitting your article "Deep learning based feature extraction for prediction and interpretation of sharp-wave ripples" for consideration by <italic>eLife</italic>. Your article has been reviewed by 2 peer reviewers, and the evaluation has been overseen by a Reviewing Editor and John Huguenard as the Senior Editor. The reviewers have opted to remain anonymous.</p><p>The reviewers have discussed their reviews with one another, and the Reviewing Editor has drafted this to help you prepare a revised submission.</p><p>In the present study, Navas-Olive et al., introduce a novel method to detect and characterize sharp-wave ripples (SWRs) in the hippocampus. Specifically, the study presents how a convolutional neural net (CNN) may achieve better performance than more traditional signal processing techniques. While each reviewer has raised a number of specific concerns about the present study, there was an agreement that the following essential revisions needed to be addressed to warrant publication of the manuscript.</p><p>1. The study compares the performance of SWR detection with a CNN and with more classic signal processing methods (i.e. filters). However, several aspects of this comparison are unclear, if not misleading. First, while the CNN has obviously been optimized to detect SWRs, the study should clarify how the filter parameters were chosen. The study should demonstrate that fine-tuned filters still underperform the CNNS. Moreover, the study should present a fair comparison of the performance of each method, based on the same ground truth data, using the same number of channels, etc.</p><p>2. It is unclear what types of events are detected by the CNN and missed by the traditional approach, how can one be sure these are more physiologically relevant. Isn't it possible that some of these events result for example from an increase in spiking without clear underlying SWR? The study should provide more information regarding the nature of the events that are detected by the CNN and not the traditional approach.</p><p>3. The study should also convincingly demonstrate that the CNN can be applied to a new dataset and still outperform the spectral approach without (or at least little) re-training. This is key to validating the method.</p><p>4. The higher performance of the CNN should be demonstrated with, for example, manual scoring of false positive/negative rates. In summary, there should be no d.ubt that the CNN outperforms the spectral approach across conditions and datasets.</p><p>Please see the reviewers' comments below for more details.</p><p><italic>Reviewer #1 (Recommendations for the authors):</italic></p><p>&#8211; Several times throughout the paper the authors claim that "spectral methods fail to capture the mechanistic complexity of SWRs". I understand that they want to make the point that spectral methods are based mainly on the spectrum, and therefore constrain the variability of the events to be detected. However, this sentence is misleading as spectral methods could also be used to detect variability of events if properly tuned, even in separate runs throughout one dataset if that is what is wanted. This is a rather important point as it can be misinterpreted by the fact that those methods cannot be used to detect a variety of SWRs, which is not true if they are used properly.</p><p>&#8211; Several times the authors also claim that they are able to detect "physiologically relevant processes" with their new method, but as it is, the manuscript just shows that they can detect new events, and remain to be shown whether they are "physiologically relevant".</p><p>&#8211; In "Figure S1. Network definition and parameters" do the authors mean "Figure 1"? In Figure 1 the authors show how they tune the parameters that work best for their CNN method and from there they compare it with a filter method. Presumably, the filter method has also been passed through a parameter tuning process to be used to its best performance but this is not stated and not shown anywhere in the paper. If "relatively arbitrary" parameters are used, then this could be the explanation of why the performance of the filter method is worst compared to CNN.</p><p>&#8211; In Figure 2. What do the authors mean by absolute ground truth? I could not find a clear explanation in the text and it seems to me that the authors refer to "absolute ground truth" as the events detected by CNN? If this is the case I am not sure is the best approach to use this as a fair comparison of "absolute ground truth". Similarly, I don't think is the best approach to use the "mean reference of performance" the score of a second research (nGT) of that of the previous researcher (oGT) as the second score will inherit the fails of the first score. Instead, maybe compare the two of these metrics separately or take the average of the two of them?</p><p>&#8211; The authors should show at least one manual score of the performance of their CNN method detection, showing examples of what they might consider false positives and missed scores. In figure 2D they did it for an external dataset and they re-scored it in order to "correct" the original ground truth. They show a "false positive" that they corrected but as I understand it, if that event was not part of the ground truth is a "missed" or "false negative" event instead of a "false positive" right?</p><p>&#8211; In Figure 2E the authors show the differences between CNN with different precision and the filter method, while the performance is better the trends are extremely similar and the numbers are very close for all comparisons (except for the recall where the filter clearly performs worse than CNN) and the significance might be an effect of sample size.</p><p>&#8211; The authors claim that "some predictions not consistent with the current definition of SWR may identify different forms of population firing and oscillatory activities associated with sharp-wave", while this is true, it is the fact that by the nature of the LFP and spiking activity, typical noise of the network at low (LFP) and high (spikes) frequencies could be capture in the CNN and misinterpreted as a "relevant event".</p><p>&#8211; In Figure 5 the authors claim that they find "striking differences in firing rates and timings of SWRs detected at SO, SR and SLM", however, from the example plots in Figure 5H it is clear that except SO, all other strata follow a similar timing, with bot SO and to some extent SLM showing some misalignment in time. How confident are the authors about this variability which turn out to be significant in Figure 5H is not related to the fact that at the two sides of the dipole of the pyramidal cell layer (SO and SLM) more noise can be detected due to larger events fluctuations that not necessarily are ripple events? In other words, the events detected at SO and SLM could contain a higher percentage of false positives? Alternatively, could the variability be related to the occurrence (and detection) of similar events in neighboring spectral bands (i.e., &#947; events)? The authors should discuss this point in the text.</p><p>Overall, I think the method is interesting and could be very useful to detect more nuance within hippocampal LFPs and offer new insights into the underlying mechanisms of hippocampal firing and how they organize in various forms of network events related to memory. Nonetheless, I suggest clarifying the above points for better interpretability as it will also clarify the context of how the method is being validated and where it could be applied in the future.</p><p><italic>Reviewer #2 (Recommendations for the authors):</italic></p><p>The key points that are required to convincingly support the claims of the paper are:</p><p>1. Comparing the CNN to a filter approach that has access to the same number of channels and can also learn the relative weight of each.</p><p>2. Showing that the CNN significantly outperforms such a model in detecting SWRs.</p><p>3. Convincingly demonstrating that the model can be applied to new datasets with good performance and reasonable overhead.</p><p>4. Showing that the CNN can identify real, biologically relevant aspects of SWRs that a filter cannot.</p></body></sub-article><sub-article article-type="reply" id="sa2"><front-stub><article-id pub-id-type="doi">10.7554/eLife.77772.sa2</article-id><title-group><article-title>Author response</article-title></title-group></front-stub><body><disp-quote content-type="editor-comment"><p>1. The study compares the performance of SWR detection with a CNN and with more classic signal processing methods (i.e. filters). However, several aspects of this comparison are unclear, if not misleading. First, while the CNN has obviously been optimized to detect SWRs, the study should clarify how the filter parameters were chosen. The study should demonstrate that fine-tuned filters still underperform the CNNS.</p></disp-quote><p>See below the results of the parameter study for the filter in the very same sessions used for training the CNN. The parameters chosen (100-300Hz band, order 2) provided maximal performance in the test set. Therefore, both methods are similarly optimized along training. This is now included (page 4):</p><p>&#8220;In order to compare CNN performance against spectral methods, we implemented a Butterworth filter, which parameters were optimized using the same training set (Figure 1&#8212;figure supplement 1D).&#8221;</p><disp-quote content-type="editor-comment"><p>Moreover, the study should present a fair comparison of the performance of each method, based on the same ground truth data, using the same number of channels, etc.</p></disp-quote><p>Please, note that the same ground truth data was used to optimize, to test and to validate performance of both models. Regarding using the same number of channels for the offline filter detection, please see <xref ref-type="fig" rid="sa2fig1">Author response image 1</xref>, a comparison of performance using different combinations of channels, from the standard detection at the SP layer (pyr) up to 4 and 8 channels (consensus detection). The filter performance is consistent across configurations and do not improve as more channels are added (actually there is a trend to decrease). We made a note in Figure 1-supp-1D, caption:</p><p>&#8220;Evaluation of the parameters of the Butterworth filter exhibiting performance F1&gt;0.65 (green values), similar to the CNN. The chosen parameters (100-300 Hz bandwidth and order 2) are indicated by arrowheads. We found no effect of the number of channels used for the filter (1, 4 and 8 channels), and chose that with the higher ripple power&#8221;</p><fig id="sa2fig1" position="float"><label>Author response image 1.</label><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/sa2-fig1.jpg"/></fig><disp-quote content-type="editor-comment"><p>2. It is unclear what types of events are detected by the CNN and missed by the traditional approach, how can one be sure these are more physiologically relevant.</p></disp-quote><p>We have visually validated all false positives (FP) detected by the CNN and the filter. As can be seen below the large majority of FP detected by the filter were artifacts (53.9% vs 27.7% for the CNN). The CNN detected more SW-no ripple (20.1% vs 7.8% for the filter) and events with population firing (14.9% vs 6.2%). Please, note that in many labs, detection of population firing synchrony is used as a proxy of replay events and ripples. For instance, this is exactly the issue with the Grosmark and Buzsaki 2016 ground truth, as we discuss below. We now include this analysis in the new Figure 4F. To facilitate the reader examining examples of True Positive and False Positive detections we also include a new figure (Figure 5), which comes with the executable code (see page 9)</p><disp-quote content-type="editor-comment"><p>Isn't it possible that some of these events result for example from an increase in spiking without clear underlying SWR? The study should provide more information regarding the nature of the events that are detected by the CNN and not the traditional approach.</p></disp-quote><p>To address this concern further, we estimated the power spectra of FP events detected by the CNN and missed by the filter and vice versa. We also considered TP events detected by both methods, as a reference. As can be seen in <xref ref-type="fig" rid="sa2fig2">Author response image 2</xref> below, for TP events detected by both methods the power spectrum displays two peaks corresponding to contributions by the sharp-wave (2-20 Hz) and the ripple (100-250 Hz) (see Oliva et al., Cell Reports 2018 for similar analysis). FP events detected exclusively by the CNN displayed a similar low frequency peak corresponding to the sharp-wave, and no dominant contribution in the high frequency band. In contrast, FP events detected exclusively by the filter more likely reflect events with higher frequency components. These FP events are among those categorized above.</p><fig id="sa2fig2" position="float"><label>Author response image 2.</label><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/sa2-fig2.jpg"/></fig><p>Finally, we compared the features of TP events detected by both methods. There is effect of threshold in both the frequency and power values of SWR detected by the filter (upper plots). In contrast, the CNN was less sensitive, as we already discussed (bottom plots). Moreover, SWR events detected by the CNN exhibited features similar to those of the ground truth (GT), while for the filter there were some significant differences. We include this analysis in the new Figure 2 of the revised version (page 5).</p><disp-quote content-type="editor-comment"><p>3. The study should also convincingly demonstrate that the CNN can be applied to a new dataset and still outperform the spectral approach without (or at least little) re-training. This is key to validating the method.</p></disp-quote><p>We would like to stress that we already provided this evidence in the previous version. Please note that the CNN was applied to several new datasets without re-training: (a) 15 sessions from 5 mice never used for training and test (Figure 2A); (b) new experimental sessions for online detection (Figure 2C); (c) an external dataset (Grossmark and Buzsaki 2016) (Figure 3E); (c) new data recorded with ultra-dense Neuropixels (Figure 7). We tried to make this even clearer in the text and figures (see schemes in Figure 3D; 7A) and reinforced the message in the abstract, introduction and Discussion sections. Therefore, the manuscript already addressed this point. We apologize if that was not clear enough.</p><p>In addition, we are currently applying the CNN without re-training to data from many other labs using different electrode configurations, including tetrodes, linear silicon probes and wires. To handle with different input channel configurations, we have developed an interpolation approach, which transform data into 8-channel inputs. Results confirm very good performance of the CNN without the need for retraining. Since we cannot disclose third-party data, we have looked for a new dataset from our own lab to illustrate the case. See <xref ref-type="fig" rid="sa2fig3">Author response image 3</xref>, results from 16ch silicon probes (100 &#956;m inter-electrode separation), where the CNN performed better than the filter (F1: p=0.0169; Precision, p=0.0110; 7 sessions, from 3 mice). We found that the performance of the CNN depends on the laminar LFP profile recorded per session, as Neuropixels data illustrate. This material will be incorporated in a subsequent paper aimed to test the CNN model online using different electrode configurations and experimental preparations.</p><fig id="sa2fig3" position="float"><label>Author response image 3.</label><graphic mimetype="image" mime-subtype="jpeg" xlink:href="elife-77772.xml.media/sa2-fig3.jpg"/></fig><p>Last, but not least, we provide the reader with open source codes, and access to annotated data to facilitate using and disseminating the tool. In particular:&#8211; Data is deposited in the Figshare repository https://figshare.com/projects/cnn-rippledata/117897.</p><p>&#8211; The trained model is accessible at the Github repository for both Python:</p><p>https://github.com/PridaLab/cnn-ripple, and Matlab: https://github.com/PridaLab/cnnmatlab</p><p>&#8211; Code visualization and detection is shown in an interactive notebook https://colab.research.google.com/github/PridaLab/cnnripple/blob/main/src/notebooks/cnn-example.ipynb.</p><p>&#8211; The online detection Open Ephys plugin is accessible at the Github repository: https://github.com/PridaLab/CNNRippleDetectorOEPlugin &#8211; An executable figure (Figure 5) is provided:</p><p>https://colab.research.google.com/github/PridaLab/cnn-ripple-executablefigure/blob/main/cnn-ripple-false-positive-examples.ipynb</p><disp-quote content-type="editor-comment"><p>4. The higher performance of the CNN should be demonstrated with, for example, manual scoring of false positive/negative rates. In summary, there should be no doubt that the CNN outperforms the spectral approach across conditions and datasets.</p></disp-quote><p>We have manually scored the FP of both the filter and the CNN, as shown before. The FN rate is 1-R, where R is recall, and it is very similar with a bit lower value for the CNN (CNN 0.34; filter 0.38). The FP rate is defined as FP/N, where N are all events in the ground truth that are not SWR. Since SWR are very erratic and most windows do not contain events, the resulting FP rate was very close to 0 for both methods. For this reason, we relied in estimating P (precision), R (recall) and F1, which are higher for the CNN (Figure 2A). We feel there is no doubt that the CNN outperformed the spectral filter across conditions and datasets.</p><disp-quote content-type="editor-comment"><p>Please see the reviewers' comments below for more details.</p></disp-quote><p>Please, see our responses to reviewers&#8217; comments</p><disp-quote content-type="editor-comment"><p>Reviewer #1 (Recommendations for the authors):</p><p>&#8211; In "Figure S1. Network definition and parameters" do the authors mean "Figure 1"? In Figure 1 the authors show how they tune the parameters that work best for their CNN method and from there they compare it with a filter method. Presumably, the filter method has also been passed through a parameter tuning process to be used to its best performance but this is not stated and not shown anywhere in the paper. If "relatively arbitrary" parameters are used, then this could be the explanation of why the performance of the filter method is worst compared to CNN.</p></disp-quote><p>This is now addressed in the new Figure 1-supp-1. The parameters chosen (100-300Hz band, order 2) provided maximal performance in the test set. Therefore, both methods are similarly optimized along training. This is now included (page 4)</p><disp-quote content-type="editor-comment"><p>&#8211; In Figure 2. What do the authors mean by absolute ground truth? I could not find a clear explanation in the text and it seems to me that the authors refer to "absolute ground truth" as the events detected by CNN? If this is the case I am not sure is the best approach to use this as a fair comparison of "absolute ground truth". Similarly, I don't think is the best approach to use the "mean reference of performance" the score of a second research (nGT) of that of the previous researcher (oGT) as the second score will inherit the fails of the first score. Instead, maybe compare the two of these metrics separately or take the average of the two of them?</p></disp-quote><p>We thank the reviewer for this comment. Just to clarify, by the absolute ground truth we meant the &#8216;whole truth&#8217; that includes all SWR (which is unknown). Since the experts&#8217; ground truths are similar at about 70%, there are non-tagged SWR missed in the individual ground truths. Thus, as we increase the number of experts we should converge on the absolute ground truth because they are adding more events to the pool. The analysis is about the effect of the experts&#8217; ground truth on performance. What we show is that when we consolidated the two GTs, meaning those events detected by different experts add together, the CNN improved performance but not the filter. This makes sense because this rescues some of those events that could be arguable whether they reflect SWR or not to the eyes of individual experts which are more typically missed by the filter (e.g. population synchrony with sharp-wave or sharp-wave no ripples). Please, note that the two experts annotated data independently, but the GT by the new expert is used for validation purposes only (no training).</p><p>To avoid misunderstanding we removed the reference to the absolute ground truth from Figure 3B, and clarified the issue:</p><p>&#8220;To evaluate the impact of these potential biases, we used the ground truth from a second expert in the lab for validation purposes only (3403 events, n=14 sessions, 7 mice).&#8221; (page 6) &#8220;In contrast, the filter failed to exhibit such an improvement, and performed worse when tested against the consolidated ground truth (one-way ANOVA for models, F(2)=0.02, p=0.033) (Figure 3B). &#8220;(page 7). &#8220;CNN performance improves when confronted with the consolidated ground truth, supporting that shared community tagging may help to advance our understanding of SWR definition&#8221; (page 8)</p><disp-quote content-type="editor-comment"><p>&#8211; The authors should show at least one manual score of the performance of their CNN method detection, showing examples of what they might consider false positives and missed scores. In figure 2D they did it for an external dataset and they re-scored it in order to "correct" the original ground truth. They show a "false positive" that they corrected but as I understand it, if that event was not part of the ground truth is a "missed" or "false negative" event instead of a "false positive" right?</p></disp-quote><p>We apologize for confusion. Note we manually scored all False Positive events from the training and validation dataset (17 sessions, from 7 mice). This is now shown for both the filter and the CNN in Figure 4F and examples are shown in the executable Figure 5. Regarding the new Figure 3D,E, we reevaluated events from the external data set. Please, note that we chose the Grosmark and Buzsaki 2016 dataset because SWR detection was conditioned on the coincidence of both population synchrony and LFP definition thus providing a &#8220;partial ground truth&#8221; (i.e. SWR without population firing were not annotated in the dataset). Thus, we revalidated False Positive detection. This is a perfect example of how the experimental goal (examining replay and thus relying in population firing plus LFP definitions) limits the ground truth. We have clarified the text.</p><p>&#8220;To evaluate this point further, and to test the capability of the CNN to generalize beyond its original training using head-fixed mice data, we used an externally annotated dataset of SWR recorded with high-density silicon probes from freely moving rats (Grosmark and Buzs&#225;ki, 2016) (Figure 3D; 2041 events; 5 sessions from 2 rats; Sup.Table.1). In that work, SWR detection was conditioned on the coincidence of both population synchrony and LFP definition, thus providing a &#8220;partial ground truth&#8221; (i.e. SWR without population firing were not annotated in the dataset).&#8221; See page 7.</p><disp-quote content-type="editor-comment"><p>&#8211; In Figure 2E the authors show the differences between CNN with different precision and the filter method, while the performance is better the trends are extremely similar and the numbers are very close for all comparisons (except for the recall where the filter clearly performs worse than CNN) and the significance might be an effect of sample size.</p></disp-quote><p>This refers again to the new Figure 3D,E of the external dataset. Following the advice we have added more samples to improve statistical testing (n=5 sessions from 2 rats). We now report significant differences. See Figure 3E.</p><disp-quote content-type="editor-comment"><p>&#8211; The authors claim that "some predictions not consistent with the current definition of SWR may identify different forms of population firing and oscillatory activities associated with sharp-wave", while this is true, it is the fact that by the nature of the LFP and spiking activity, typical noise of the network at low (LFP) and high (spikes) frequencies could be capture in the CNN and misinterpreted as a "relevant event".</p></disp-quote><p>As suggested, we have validated al FP predictions (new Figure 4F; Figure 5). We also evaluate the quantitative features of SWR events detected by the filter and the CNN, and compare them with the GT (Figure 2B). Finally, we discuss this point in the revised version. In particular:</p><p>&#8220;While we cannot discard noisy detection from a continuum of LFP activity, our categorization suggest they may reflect processes underlying buildup of population events (de la Prida et al., 2006). In addition, the ability of CA3 inputs to bring about &#947; oscillations and multi-unit firing associated with sharp-waves is already recognized (Sullivan et al., 2011), and variability of the ripple power can be related with different cortical subnetworks (Abadchi et al., 2020; RamirezVillegas et al., 2015).&#8221; (page 16).</p><disp-quote content-type="editor-comment"><p>&#8211; In Figure 5 the authors claim that they find "striking differences in firing rates and timings of SWRs detected at SO, SR and SLM", however, from the example plots in Figure 5H it is clear that except SO, all other strata follow a similar timing, with bot SO and to some extent SLM showing some misalignment in time.</p></disp-quote><p>We apologize for generating confusion. We meant that the analysis was performed by comparing properties of SWR detected at SO, SR and SLM using z- values scored by SWR detected at SP only. We clarified this point in the revised version (page 14).</p><disp-quote content-type="editor-comment"><p>How confident are the authors about this variability which turn out to be significant in Figure 5H is not related to the fact that at the two sides of the dipole of the pyramidal cell layer (SO and SLM) more noise can be detected due to larger events fluctuations that not necessarily are ripple events? In other words, the events detected at SO and SLM could contain a higher percentage of false positives? Alternatively, could the variability be related to the occurrence (and detection) of similar events in neighboring spectral bands (i.e., &#947; events)? The authors should discuss this point in the text.</p></disp-quote><p>As we showed above there is no differences of False Positive detections between SO, SR and SLM layers. Regarding the potential effect of background activity, we now discuss this point:</p><p>&#8220;While we cannot discard noisy detection from a continuum of LFP activity, our categorization suggest they may reflect processes underlying buildup of population events (de la Prida et al., 2006). In addition, the ability of CA3 inputs to bring about &#947; oscillations and multi-unit firing associated with sharp-waves is already recognized (Sullivan et al., 2011), and variability of the ripple power can be related with different cortical subnetworks (Abadchi et al., 2020; RamirezVillegas et al., 2015).&#8221; (Page 16)</p><disp-quote content-type="editor-comment"><p>Overall, I think the method is interesting and could be very useful to detect more nuance within hippocampal LFPs and offer new insights into the underlying mechanisms of hippocampal firing and how they organize in various forms of network events related to memory. Nonetheless, I suggest clarifying the above points for better interpretability as it will also clarify the context of how the method is being validated and where it could be applied in the future.</p></disp-quote><p>Thank you for constructive comments</p><disp-quote content-type="editor-comment"><p>Reviewer #2 (Recommendations for the authors):</p><p>The key points that are required to convincingly support the claims of the paper are:</p></disp-quote><p>We thank the reviewer for providing this checklist. Please, see below.</p><disp-quote content-type="editor-comment"><p>1. Comparing the CNN to a filter approach that has access to the same number of channels and can also learn the relative weight of each.</p></disp-quote><p>We have examined the filter when applied to the same number of channels (8-channels), which did not improve performance. We also optimized the filter parameters using the 2 training sessions, similar to the CNN. Our data still support the CNN detection capability better than the filter. Please, note that training an algorithm to learn the weight of each filter it will be an ANN itself and this is an entirely new project.</p><disp-quote content-type="editor-comment"><p>2. Showing that the CNN significantly outperforms such a model in detecting SWRs.</p></disp-quote><p>Here, we have provided evidence supporting that the CNN significantly outperforms the filter using: (a) offline validation in datasets not used for training (mice recorded head-fixed); (b) online validation in new experiments; (c) an independent dataset tagged by two experts; (d) an external data set from different preparation and species (freely moving rats) to illustrate the effect of the definition of the ground truth (SWR without population firing were not originally annotated in the dataset); (e) new dataset of Neuropixels recordings from head-fixed mice. In addition, in response to the reviewers we showed: (a) that using 8-channels and a consensus optimization did not increased performance of the filter; (b) preliminary results from coarse-density 16ch recorded (100 &#181;m inter-elecrode distance), analyzed by the CNN using interpolation strategies, confirmed the ability of the trained model to generalize.</p><disp-quote content-type="editor-comment"><p>3. Convincingly demonstrating that the model can be applied to new datasets with good performance and reasonable overhead.</p></disp-quote><p>Please, note that in the original version we do apply the model to different new datasets without raining. See response above (point 2).</p><disp-quote content-type="editor-comment"><p>4. Showing that the CNN can identify real, biologically relevant aspects of SWRs that a filter cannot.</p></disp-quote><p>We have validated all detections by both the filter and the CNN to show that the network clearly identify more biologically relevant events such as SW-no ripple and SWR with population firing. Also, we provide comparisons of the features of TP events detected by both methods, to support that SWR detected by the CNN exhibited features more similar to those of the ground truth than those detected by the filter.</p></body></sub-article></article>