<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: Long running searches in Splunk Search</title>
    <link>https://community.splunk.com/t5/Splunk-Search/Long-running-searches/m-p/529476#M149540</link>
    <description>&lt;P&gt;&lt;SPAN&gt;&lt;SPAN&gt;&lt;SPAN&gt;There's a known issue for the version - there's a deadlock situation happening for the version and causing the symptoms of long running searches with the never ending messages you have found.&lt;BR /&gt;&lt;BR /&gt;&lt;EM&gt;09-28-2020 14:52:53.906 INFO DispatchExecutor - User applied action=CANCEL while status=3 &lt;/EM&gt;&lt;BR /&gt;&lt;EM&gt;09-28-2020 14:52:54.906 INFO ReducePhaseExecutor - ReducePhaseExecutor=1 action=CANCEL&lt;/EM&gt;&lt;BR /&gt;&lt;BR /&gt;This has been fixed in 8.0.2.1 and 8.0.3 and also you can add this as a work-around which may have minor search performance impact until you upgrade to the fixed versions.&lt;BR /&gt;&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;&lt;SPAN&gt;&lt;SPAN&gt;&lt;SPAN&gt;** work-around&lt;BR /&gt;limits.conf on all SH.&lt;BR /&gt;[search]&lt;BR /&gt;remote_timeline= 0&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;** fixed versions: 8.0.2.1 and 8.0.3+&lt;/P&gt;</description>
    <pubDate>Sun, 15 Nov 2020 23:48:19 GMT</pubDate>
    <dc:creator>sylim_splunk</dc:creator>
    <dc:date>2020-11-15T23:48:19Z</dc:date>
    <item>
      <title>Long running searches</title>
      <link>https://community.splunk.com/t5/Splunk-Search/Long-running-searches/m-p/529475#M149539</link>
      <description>&lt;P&gt;On all SearchHead cluster members with ver 8.0.2,&amp;nbsp; every day we are observing that CPU utilization grows. After roughly two days CPU load grapsh looks like "climbing".&lt;/P&gt;&lt;P&gt;After our analysis we found that several queries are "zombied" and it looks like Splunk does not control them.&lt;BR /&gt;These processes runs on Operating System level endlessly like consuming more and more CPU over time.&lt;/P&gt;&lt;P&gt;In UI there is message that "Search auto-canceled"&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;Always on the end of search.log for such process we see ;&lt;/P&gt;&lt;TABLE border="1" width="100%"&gt;&lt;TBODY&gt;&lt;TR&gt;&lt;TD width="100%"&gt;09-28-2020 14:52:57.907 INFO ReducePhaseExecutor - ReducePhaseExecutor=1 action=CANCEL&lt;BR /&gt;09-28-2020 14:52:57.907 INFO DispatchExecutor - User applied action=CANCEL while status=3&lt;BR /&gt;09-28-2020 14:52:58.906 INFO ReducePhaseExecutor - ReducePhaseExecutor=1 action=CANCEL&lt;BR /&gt;09-28-2020 14:52:58.906 INFO DispatchExecutor - User applied action=CANCEL while status=3&lt;BR /&gt;09-28-2020 14:52:59.906 INFO ReducePhaseExecutor - ReducePhaseExecutor=1 action=CANCEL&lt;/TD&gt;&lt;/TR&gt;&lt;/TBODY&gt;&lt;/TABLE&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Please help.&lt;/P&gt;</description>
      <pubDate>Sun, 15 Nov 2020 23:45:47 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Splunk-Search/Long-running-searches/m-p/529475#M149539</guid>
      <dc:creator>sylim_splunk</dc:creator>
      <dc:date>2020-11-15T23:45:47Z</dc:date>
    </item>
    <item>
      <title>Re: Long running searches</title>
      <link>https://community.splunk.com/t5/Splunk-Search/Long-running-searches/m-p/529476#M149540</link>
      <description>&lt;P&gt;&lt;SPAN&gt;&lt;SPAN&gt;&lt;SPAN&gt;There's a known issue for the version - there's a deadlock situation happening for the version and causing the symptoms of long running searches with the never ending messages you have found.&lt;BR /&gt;&lt;BR /&gt;&lt;EM&gt;09-28-2020 14:52:53.906 INFO DispatchExecutor - User applied action=CANCEL while status=3 &lt;/EM&gt;&lt;BR /&gt;&lt;EM&gt;09-28-2020 14:52:54.906 INFO ReducePhaseExecutor - ReducePhaseExecutor=1 action=CANCEL&lt;/EM&gt;&lt;BR /&gt;&lt;BR /&gt;This has been fixed in 8.0.2.1 and 8.0.3 and also you can add this as a work-around which may have minor search performance impact until you upgrade to the fixed versions.&lt;BR /&gt;&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;&lt;SPAN&gt;&lt;SPAN&gt;&lt;SPAN&gt;** work-around&lt;BR /&gt;limits.conf on all SH.&lt;BR /&gt;[search]&lt;BR /&gt;remote_timeline= 0&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;** fixed versions: 8.0.2.1 and 8.0.3+&lt;/P&gt;</description>
      <pubDate>Sun, 15 Nov 2020 23:48:19 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Splunk-Search/Long-running-searches/m-p/529476#M149540</guid>
      <dc:creator>sylim_splunk</dc:creator>
      <dc:date>2020-11-15T23:48:19Z</dc:date>
    </item>
    <item>
      <title>Re: Long running searches</title>
      <link>https://community.splunk.com/t5/Splunk-Search/Long-running-searches/m-p/574203#M200108</link>
      <description>&lt;P&gt;It's happening to me on version 8.0.3 right now.&lt;/P&gt;</description>
      <pubDate>Tue, 09 Nov 2021 14:16:33 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Splunk-Search/Long-running-searches/m-p/574203#M200108</guid>
      <dc:creator>esalesap</dc:creator>
      <dc:date>2021-11-09T14:16:33Z</dc:date>
    </item>
  </channel>
</rss>

