<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: splunk in All Apps and Add-ons</title>
    <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686652#M80587</link>
    <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/264563"&gt;@shakti&lt;/a&gt;,&lt;/P&gt;&lt;P&gt;use faster disks if you're using a physical server or dedicated resources if you're using a virtual server and possible SSD disks.&lt;/P&gt;&lt;P&gt;Ciao.&lt;/P&gt;&lt;P&gt;Giuseppe&lt;/P&gt;</description>
    <pubDate>Tue, 07 May 2024 07:07:44 GMT</pubDate>
    <dc:creator>gcusello</dc:creator>
    <dc:date>2024-05-07T07:07:44Z</dc:date>
    <item>
      <title>splunk</title>
      <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686511#M80578</link>
      <description>&lt;P&gt;Hello ,&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;The Forwarder ingestion latency is showing red on my search head....&lt;/P&gt;&lt;UL&gt;&lt;LI&gt;&lt;STRONG&gt;Root Cause(s): &lt;/STRONG&gt;&lt;UL&gt;&lt;LI&gt;&lt;SPAN&gt;Indicator 'ingestion_latency_gap_multiplier' exceeded configured value. The observed value is 5474815. Message from 452CE67F-3C57-403C-B7B1-E34754172C83:10.250.2.7:3535&lt;/SPAN&gt;&lt;/LI&gt;&lt;/UL&gt;&lt;/LI&gt;&lt;/UL&gt;&lt;P&gt;&lt;SPAN&gt;Can anyone please provide any suggestions?&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Mon, 06 May 2024 06:55:15 GMT</pubDate>
      <guid>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686511#M80578</guid>
      <dc:creator>shakti</dc:creator>
      <dc:date>2024-05-06T06:55:15Z</dc:date>
    </item>
    <item>
      <title>Re: splunk</title>
      <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686538#M80579</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/264563"&gt;@shakti&lt;/a&gt;&amp;nbsp;,&lt;/P&gt;&lt;P&gt;there's a delay between the event timestamp and the indexing timestamp probably caused by the too high data volume.&lt;/P&gt;&lt;P&gt;This could be caused by a queue issue on the Forwarder, by a network latency or by a resource provlem (usually storage performance) on your Indexers.&lt;/P&gt;&lt;P&gt;You can check queues using a search like the following&amp;nbsp;&lt;/P&gt;&lt;LI-CODE lang="markup"&gt;index=_internal  source=*metrics.log sourcetype=splunkd group=queue 
| eval name=case(name=="aggqueue","2 - Aggregation Queue",
 name=="indexqueue", "4 - Indexing Queue",
 name=="parsingqueue", "1 - Parsing Queue",
 name=="typingqueue", "3 - Typing Queue",
 name=="splunktcpin", "0 - TCP In Queue",
 name=="tcpin_cooked_pqueue", "0 - TCP In Queue") 
| eval max=if(isnotnull(max_size_kb),max_size_kb,max_size) 
| eval curr=if(isnotnull(current_size_kb),current_size_kb,current_size) 
| eval fill_perc=round((curr/max)*100,2) 
| bin _time span=1m
| stats Median(fill_perc) AS "fill_percentage" perc90(fill_perc) AS "90_perc" max(max) AS max max(curr) AS curr by host, _time, name 
| where (fill_percentage&amp;gt;70 AND name!="4 - Indexing Queue") OR (fill_percentage&amp;gt;70 AND name="4 - Indexing Queue")
| sort -_time&lt;/LI-CODE&gt;&lt;P&gt;About resources, did you checked the IOPS of your storage?&lt;/P&gt;&lt;P&gt;have the correct number of CPUs?&lt;/P&gt;&lt;P&gt;at least, does your network have sufficient bandwidth to support your data volume?&lt;/P&gt;&lt;P&gt;Ciao.&lt;/P&gt;&lt;P&gt;Giuseppe&lt;/P&gt;</description>
      <pubDate>Mon, 06 May 2024 09:02:53 GMT</pubDate>
      <guid>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686538#M80579</guid>
      <dc:creator>gcusello</dc:creator>
      <dc:date>2024-05-06T09:02:53Z</dc:date>
    </item>
    <item>
      <title>Re: splunk</title>
      <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686539#M80580</link>
      <description>&lt;P&gt;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/161352"&gt;@gcusello&lt;/a&gt;&amp;nbsp; Appreciate your reply....&lt;/P&gt;&lt;P&gt;we have indexer clustering environment . However for both indexers and search head we are using only 4 CPU physical cores ..Do&amp;nbsp; you think that can cause this problem?&lt;/P&gt;</description>
      <pubDate>Mon, 06 May 2024 09:07:13 GMT</pubDate>
      <guid>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686539#M80580</guid>
      <dc:creator>shakti</dc:creator>
      <dc:date>2024-05-06T09:07:13Z</dc:date>
    </item>
    <item>
      <title>Re: splunk</title>
      <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686543#M80581</link>
      <description>&lt;P&gt;Also , if i may know what should be the good I/O operations for splunk?&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Mon, 06 May 2024 09:55:33 GMT</pubDate>
      <guid>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686543#M80581</guid>
      <dc:creator>shakti</dc:creator>
      <dc:date>2024-05-06T09:55:33Z</dc:date>
    </item>
    <item>
      <title>Re: splunk</title>
      <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686555#M80582</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/264563"&gt;@shakti&lt;/a&gt;,&lt;/P&gt;&lt;P&gt;surely the number of CPUs in one of the root causes of your issue, because Splunk requires at least 12 CPUs for Indexers and 16 if you also have ES.&lt;/P&gt;&lt;P&gt;Anyway, check the IOPS (using a tool e.g. like Bonnie++ or FIO), because this is the usual major issue in queue problems.&lt;/P&gt;&lt;P&gt;Ciao.&lt;/P&gt;&lt;P&gt;Giuseppe&lt;/P&gt;</description>
      <pubDate>Mon, 06 May 2024 10:34:03 GMT</pubDate>
      <guid>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686555#M80582</guid>
      <dc:creator>gcusello</dc:creator>
      <dc:date>2024-05-06T10:34:03Z</dc:date>
    </item>
    <item>
      <title>Re: splunk</title>
      <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686651#M80586</link>
      <description>&lt;P&gt;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/161352"&gt;@gcusello&lt;/a&gt;&amp;nbsp; &amp;nbsp;Thank you for your reply&amp;nbsp;&lt;/P&gt;&lt;P&gt;The IOPS of indexers and search heads is between 50 -300 ...I guess its pretty less&amp;nbsp; ...May I know do you have any suggestions how to improve on it?&lt;/P&gt;</description>
      <pubDate>Tue, 07 May 2024 07:02:34 GMT</pubDate>
      <guid>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686651#M80586</guid>
      <dc:creator>shakti</dc:creator>
      <dc:date>2024-05-07T07:02:34Z</dc:date>
    </item>
    <item>
      <title>Re: splunk</title>
      <link>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686652#M80587</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.splunk.com/t5/user/viewprofilepage/user-id/264563"&gt;@shakti&lt;/a&gt;,&lt;/P&gt;&lt;P&gt;use faster disks if you're using a physical server or dedicated resources if you're using a virtual server and possible SSD disks.&lt;/P&gt;&lt;P&gt;Ciao.&lt;/P&gt;&lt;P&gt;Giuseppe&lt;/P&gt;</description>
      <pubDate>Tue, 07 May 2024 07:07:44 GMT</pubDate>
      <guid>https://community.splunk.com/t5/All-Apps-and-Add-ons/splunk/m-p/686652#M80587</guid>
      <dc:creator>gcusello</dc:creator>
      <dc:date>2024-05-07T07:07:44Z</dc:date>
    </item>
  </channel>
</rss>

