<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: Splunk stopped ingesting in Getting Data In</title>
    <link>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642469#M109487</link>
    <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/224632"&gt;@Roy_9&lt;/a&gt;,&lt;/P&gt;&lt;P&gt;probably the performance tests gave an overload on resources so your hardware hasn't the necessary resources to read and forwarrd logs.&lt;/P&gt;&lt;P&gt;You can test queues using this search:&lt;/P&gt;&lt;LI-CODE lang="markup"&gt;index=_internal  source=*metrics.log sourcetype=splunkd group=queue host=&amp;lt;your_host&amp;gt;
| eval name=case(name=="aggqueue","2 - Aggregation Queue",
 name=="indexqueue", "4 - Indexing Queue",
 name=="parsingqueue", "1 - Parsing Queue",
 name=="typingqueue", "3 - Typing Queue",
 name=="splunktcpin", "0 - TCP In Queue",
 name=="tcpin_cooked_pqueue", "0 - TCP In Queue") 
| eval max=if(isnotnull(max_size_kb),max_size_kb,max_size) 
| eval curr=if(isnotnull(current_size_kb),current_size_kb,current_size) 
| eval fill_perc=round((curr/max)*100,2) 
| bin _time span=1m
| stats Median(fill_perc) AS "fill_percentage" max(max) AS max max(curr) AS curr by host, _time, name 
| where (fill_percentage&amp;gt;70 AND name!="4 - Indexing Queue") OR (fill_percentage&amp;gt;70 AND name="4 - Indexing Queue")
| sort -_time&lt;/LI-CODE&gt;&lt;P&gt;If you'll find queues at 100%, you'll have found the reason of the stop.&lt;/P&gt;&lt;P&gt;Ciao.&lt;/P&gt;&lt;P&gt;Giusepep&lt;/P&gt;</description>
    <pubDate>Sat, 06 May 2023 10:49:46 GMT</pubDate>
    <dc:creator>gcusello</dc:creator>
    <dc:date>2023-05-06T10:49:46Z</dc:date>
    <item>
      <title>Why did Splunk stop ingesting?</title>
      <link>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642459#M109484</link>
      <description>&lt;P&gt;Hello,&lt;/P&gt;
&lt;P&gt;I have a usecase where few servers stopped ingesting for 3-4 hrs when the user is doing performance testing on those servers and then servers started ingesting again automatically, I am not sure what caused the ingestion to stop.&lt;/P&gt;
&lt;P&gt;During the time when the ingestion has stopped, logs are still available on the server&lt;/P&gt;
&lt;P&gt;Please help me with the troubleshooting on what might have caused for this issue and how I can remediate this?&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Thanks in advance&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Mon, 08 May 2023 10:03:33 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642459#M109484</guid>
      <dc:creator>Roy_9</dc:creator>
      <dc:date>2023-05-08T10:03:33Z</dc:date>
    </item>
    <item>
      <title>Re: Splunk stopped ingesting</title>
      <link>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642469#M109487</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/224632"&gt;@Roy_9&lt;/a&gt;,&lt;/P&gt;&lt;P&gt;probably the performance tests gave an overload on resources so your hardware hasn't the necessary resources to read and forwarrd logs.&lt;/P&gt;&lt;P&gt;You can test queues using this search:&lt;/P&gt;&lt;LI-CODE lang="markup"&gt;index=_internal  source=*metrics.log sourcetype=splunkd group=queue host=&amp;lt;your_host&amp;gt;
| eval name=case(name=="aggqueue","2 - Aggregation Queue",
 name=="indexqueue", "4 - Indexing Queue",
 name=="parsingqueue", "1 - Parsing Queue",
 name=="typingqueue", "3 - Typing Queue",
 name=="splunktcpin", "0 - TCP In Queue",
 name=="tcpin_cooked_pqueue", "0 - TCP In Queue") 
| eval max=if(isnotnull(max_size_kb),max_size_kb,max_size) 
| eval curr=if(isnotnull(current_size_kb),current_size_kb,current_size) 
| eval fill_perc=round((curr/max)*100,2) 
| bin _time span=1m
| stats Median(fill_perc) AS "fill_percentage" max(max) AS max max(curr) AS curr by host, _time, name 
| where (fill_percentage&amp;gt;70 AND name!="4 - Indexing Queue") OR (fill_percentage&amp;gt;70 AND name="4 - Indexing Queue")
| sort -_time&lt;/LI-CODE&gt;&lt;P&gt;If you'll find queues at 100%, you'll have found the reason of the stop.&lt;/P&gt;&lt;P&gt;Ciao.&lt;/P&gt;&lt;P&gt;Giusepep&lt;/P&gt;</description>
      <pubDate>Sat, 06 May 2023 10:49:46 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642469#M109487</guid>
      <dc:creator>gcusello</dc:creator>
      <dc:date>2023-05-06T10:49:46Z</dc:date>
    </item>
    <item>
      <title>Re: Splunk stopped ingesting</title>
      <link>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642546#M109517</link>
      <description>&lt;P&gt;Hello&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/161352"&gt;@gcusello&lt;/a&gt;&amp;nbsp;&lt;BR /&gt;&lt;BR /&gt;The parsing queues fill percentages are less than 70% when the testing was run, I am not sure on what other factors that are causing the issue.Do yo have any thoughts?&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Thanks&lt;/P&gt;</description>
      <pubDate>Mon, 08 May 2023 15:45:07 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642546#M109517</guid>
      <dc:creator>Roy_9</dc:creator>
      <dc:date>2023-05-08T15:45:07Z</dc:date>
    </item>
    <item>
      <title>Re: Splunk stopped ingesting</title>
      <link>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642547#M109518</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/224632"&gt;@Roy_9&lt;/a&gt;,&lt;/P&gt;&lt;P&gt;I can only suppose that tha testing activities have an hogher priority than Splunk activities, so they have to wait for the end of the other activities.&lt;/P&gt;&lt;P&gt;Ciao.&lt;/P&gt;&lt;P&gt;Giuseppe&lt;/P&gt;</description>
      <pubDate>Mon, 08 May 2023 16:10:37 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Getting-Data-In/Why-did-Splunk-stop-ingesting/m-p/642547#M109518</guid>
      <dc:creator>gcusello</dc:creator>
      <dc:date>2023-05-08T16:10:37Z</dc:date>
    </item>
  </channel>
</rss>

