All Topics

Find Answers
Ask questions. Get answers. Find technical product solutions from passionate members of the Splunk community.

All Topics

i'm using table_row_expansion.js file whose source code is written below:  please give me your input here. require([ 'splunkjs/mvc/tableview', 'splunkjs/mvc/chartview', 'splunkjs/mvc/dataview', ... See more...
i'm using table_row_expansion.js file whose source code is written below:  please give me your input here. require([ 'splunkjs/mvc/tableview', 'splunkjs/mvc/chartview', 'splunkjs/mvc/dataview', 'splunkjs/mvc/searchmanager', 'splunkjs/mvc/postprocessmanager', 'splunkjs/mvc', 'underscore', 'splunkjs/mvc/simplexml/ready!'],function( TableView, ChartView, DataView, SearchManager, PostProcessManager, mvc, _ ){ var EventSearchBasedRowExpansionRenderer = TableView.BaseRowExpansionRenderer.extend({ initialize: function(args) { // initialize will run once, so we will set up a search and a chart to be reused. this._searchManager = new SearchManager({ id: 'details-search-manager', preview: false }); this._baseSearchManager = new SearchManager({ id: 'base-search-manager', search: '| loadjob savedsearch="debie2scmi:duerr_it_vuln_management:saved-plugin_outputv2"', preview: false }); this._postproc_pluginOutput = new PostProcessManager({ id: 'postproc-plugin-output', managerid: 'base-search-manager', }); this._tableView = new TableView({ managerid: 'details-search-manager', 'charting.legend.placement': 'none' }); this._plugintableView = new TableView({ managerid: 'postproc-plugin-output', 'charting.legend.placement': 'none' }); //this._dataView = new DataView({ // managerid: 'details-search-manager', // template: "<b>Solution:</b> <p><%= results[0].solution %></p>" //}); }, canRender: function(rowData) { // Since more than one row expansion renderer can be registered we let each decide if they can handle that // data // Here we will always handle it. return true; }, render: function($container, rowData) { // rowData contains information about the row that is expanded. We can see the cells, fields, and values // We will find the sourcetype cell to use its value var findingCell = _(rowData.cells).find(function (cell) { return cell.field === 'finding'; }); // get plugin and ip var result = findingCell.value.split("#"); //update the searches this._searchManager.set({ search: '| inputlookup nessus_plugin_solution.csv | search id=' + result[1] + ' | fields solution | append [| makeresults | eval solution="No data available." | fields - _time ] | head 1'}); // the rex is a workaround for splunk not implementing linebreaks in the details tables correctly, only mv fields seem to work this._postproc_pluginOutput.set({search: '| search pokey="' + result[3] + '" | fields plugin_output | rex mode=sed field=plugin_output "s/(\\n)/\\1 #BREAK#/g" | makemv delim="#BREAK#" plugin_output | append [| makeresults | eval plugin_output="No data available." | fields - _time ] | head 1' }); // $container is the jquery object where we can put out content. // In this case we will render our chart and add it to the $container $container.append(this._tableView.render().el); $container.append(this._plugintableView.render().el); // $container.append("IP: " + result[0] + " Plugin: " + result[1] ); } }); var CustomLinkRenderer = TableView.BaseCellRenderer.extend({ canRender: function(cell) { return cell.field === 'solved'; }, render: function($td, cell) { var solved = cell.value; var solved_result = solved.split("-"); //var a = $('<a>').attr("href", "www.test.de").text("testlink"); //var a = $('<input>').attr('type','checkbox'); //var a = $('<div>').attr({"id":"chk-sourcetype"+cell.value,"value":cell.value}).attr('class','icon-minus-circle'); var a = $('<div>').attr({"id":"chk-sourcetype"+cell.value,"value":cell.value}); // check if marked as solved if(solved_result[1] == 1) { a.attr('class','icon-check-circle'); } else { a.attr('class','icon-minus-circle'); } $td.empty().append(a); a.click(function(e) { e.preventDefault(); //window.location = $(e.currentTarget).attr('href'); // or for popup: // window.open($(e.currentTarget).attr('href')); if($(e.currentTarget).attr('class') == 'icon-minus-circle') { $(e.currentTarget).attr('class','icon-gear'); var updatestring = '| inputlookup lkp-all-findings | eval key=_key | where key="' + solved_result[2] +'" | eval lastchecked=1 | outputlookup append=t lkp-all-findings'; var kvupdate = new SearchManager({ preview: false }); kvupdate.set({search: updatestring}); kvupdate.on('search:done', function(properties) { //console.log("DONE!\nSearch job properties:", properties.content); // Set new value when search is done $(e.currentTarget).attr('class','icon-check-circle'); }); } else { $(e.currentTarget).attr('class','icon-gear'); var updatestring = '| inputlookup lkp-all-findings | eval key=_key | where key="' + solved_result[2] +'" | eval lastchecked=0 | outputlookup append=t lkp-all-findings'; var kvupdate = new SearchManager({ preview: false }); kvupdate.set({search: updatestring}); kvupdate.on('search:done', function(properties) { //console.log("DONE!\nSearch job properties:", properties.content); // Set new value when search is done $(e.currentTarget).attr('class','icon-minus-circle'); }); } }); } }); var tableElement = mvc.Components.getInstance("reports_table"); tableElement.getVisualization(function(tableView) { // Add custom cell renderer, the table will re-render automatically. tableView.addRowExpansionRenderer(new EventSearchBasedRowExpansionRenderer()); tableView.table.addCellRenderer(new CustomLinkRenderer()); // Force the table to re-render tableView.table.render(); }); }); @splunk @Anonymous @techiesid @kamlesh_vaghela  please help.
index=105261-cli sourcetype=show_system_resources | dedup deviceId | eval nexus_percent_used=round(100*memory_used/memory_total) | eval nexus_status=if(nexus_percent_used&gt;85, "Not OK", "OK") |... See more...
index=105261-cli sourcetype=show_system_resources | dedup deviceId | eval nexus_percent_used=round(100*memory_used/memory_total) | eval nexus_status=if(nexus_percent_used&gt;85, "Not OK", "OK") | fields deviceId, nexus_percent_used, nexus_status | append [ search index=105261-cli sourcetype=show_memory_statistics | dedup deviceId | eval ios_percent_used=round(100*used/total) | eval ios_status=if(ios_percent_used&gt;85, "Not OK", "OK") | fields deviceId, ios_percent_used, ios_status ] | join deviceId [ search index=105261-np sourcetype=device_details | fields deviceId, productFamily, swVersion, deviceName ] | eval percent_used=if(like(productFamily, "%Nexus%"), nexus_percent_used, ios_percent_used) | eval status=if(like(productFamily, "%Nexus%"), nexus_status, ios_status) | table deviceName, productFamily, swVersion, percent_used, status | sort -percent_used
Hi All! I need your help ! After checking that we're receiving logs into splunk mgt, I wanted to do Configuration in splunk, Actually I use to do this without running into the issue, but today I ha... See more...
Hi All! I need your help ! After checking that we're receiving logs into splunk mgt, I wanted to do Configuration in splunk, Actually I use to do this without running into the issue, but today I have run into the issue after configuration where I was seeing an error saying: 4 errors occured while the search was executing. Therefore, search results might be incomplete. Hide errors . [indexer1] Events might not be returned in sub-secoond order due to search memory limits. See search.log for more information. Increase the value of the following limits.conf setting:[search]:max_rawsize_perchunk. . [indexer3] Events might not be returned in sub-secoond order due to search memory limits. See search.log for more information. Increase the value of the following limits.conf setting:[search]:max_rawsize_perchunk. . [indexer4] Events might not be returned in sub-secoond order due to search memory limits. See search.log for more information. Increase the value of the following limits.conf setting:[search]:max_rawsize_perchunk. . [indexer5] Events might not be returned in sub-secoond order due to search memory limits. See search.log for more information. Increase the value of the following limits.conf setting:[search]:max_rawsize_perchunk.   Kindly help me on how ,I can fix this and what is the root cause of this error occur? Thank you in advance!!
I was almost tempted to not report this, because I've been unable to repeat it in a small test case. Some background: I've developed a dashboard in Splunk 8.1.0 that uses the fieldformat command wit... See more...
I was almost tempted to not report this, because I've been unable to repeat it in a small test case. Some background: I've developed a dashboard in Splunk 8.1.0 that uses the fieldformat command with the tostring function to format a duration in a Single Value visualization. Here's a standalone query (with the exception of the field name, the last stage is identical to my dashboard code): | makeresults | eval seconds=12345 | fieldformat seconds=tostring(round(seconds,1),"duration") This displays the expected value in the Single Value viz: 03:25:45.0 So far, so good. However, when a colleague running Splunk 8.0.2 created a dashboard using exactly the same Simple XML, their viz showed the original field value 12345; apparently, ignoring the fieldformat command. I asked my colleague to edit their dashboard source, and change fieldformat to eval. That worked. But then, attempting to create a minimal test case for this forum, the self-contained query cited above works okay in Splunk 8.0.2. Both in the Search & Reporting app, just dynamically selecting a Single Value viz, and also when creating a new dashboard with this query behind a Single Value viz. I'm left scratching my head. Having switched to using eval instead of fieldformat in my dashboard (because I'm not sure of the Splunk version that the intended recipient of my dashboard will be running), I'm moving on. Still, I thought it was worth at least documenting this issue, in case anyone else has the same issue, or knows what's going on here. Aside: Is there a way to format inline code here? I switched to the "Source code" view and manually applied <code> elements, but now the site complains "Your post has been changed because invalid HTML was found in the message body. The invalid HTML has been removed".
i want to create a dropdown in my dashboarrd where in i can select the month from the current year and it should give em the results based on the events generated in that parrticular month only. How ... See more...
i want to create a dropdown in my dashboarrd where in i can select the month from the current year and it should give em the results based on the events generated in that parrticular month only. How can i do that? and how to pas the token value into my tables in the dashboard?
Similar to the Regex to find a directory in a path question, how does one find the full directory path to an file (e.g. like the "dirname" command in Linux)? For example in Windows Security Event lo... See more...
Similar to the Regex to find a directory in a path question, how does one find the full directory path to an file (e.g. like the "dirname" command in Linux)? For example in Windows Security Event logs, Event ID 4688 for New Process Creation has the field "New_Process_Name" which is the full path to the executable... how does one get the full directory of the executable?
I have shown the queries I made with set diff and eval below. My aim is to compare the report of 07:00 to 07:00 of the day before at 07:00 every day and the report of that day at 07:00 to 07:00 and t... See more...
I have shown the queries I made with set diff and eval below. My aim is to compare the report of 07:00 to 07:00 of the day before at 07:00 every day and the report of that day at 07:00 to 07:00 and to post the difference. Example: To compare the search result from 30 November 07:00 to 1 December 07:00 and the search result from 1 December 07:00 to 2 December 07:00 and send the difference.   | set diff [ search NOERROR 10.217.154.253 OR 10.154.216.57 OR 10.194.41.30 earliest="-3d@d" latest="-2d@d" NOT EVRHSGSN1 NOT "*EPDG*" "*MNC001.MCC286*" "*tac-*" | fields "Query", "View", "Response_1", "Response_2", "Response_3", "Response_4", "Response_5", "Response_6", "Response_7", "Response_8", "Response_9", "Query_Type" | fields - _raw | dedup "Query" "View" | sort "Query"] [ search NOERROR 10.217.154.253 OR 10.154.216.57 OR 10.194.41.30 earliest="-2d@d" latest="-d@d" NOT EVRHSGSN1 NOT "*EPDG*" "*MNC001.MCC286*" "*tac-*" | fields "Query", "View", "Response_1", "Response_2", "Response_3", "Response_4", "Response_5", "Response_6", "Response_7", "Response_8", "Response_9", "Query_Type" | fields - _raw | dedup "Query" "View" | sort "Query"]     ----------------- with eval:         index= "syslog" NOERROR 10.217.154.253 OR 10.154.216.57 OR 10.194.41.30 earliest="11/29/2020:00:00:00" latest="11/29/2020:23:59:59" NOT EVRHSGSN1 NOT "*EPDG*" "*MNC001.MCC286*" "*tac-*" | fields "Query", "View", "Response_1", "Response_2", "Response_3", "Response_4", "Response_5", "Response_6", "Response_7", "Response_8", "Response_9", "Query_Type" | fields - _raw | dedup "Query" "View" | sort "Query" | eval ReportKey=bugun | append [search index= "syslog" NOERROR 10.217.154.253 OR 10.154.216.57 OR 10.194.41.30 earliest="11/28/2020:00:00:00" latest="11/28/2020:23:59:59" NOT EVRHSGSN1 NOT "*EPDG*" "*MNC001.MCC286*" "*tac-*" | fields "Query", "View", "Response_1", "Response_2", "Response_3", "Response_4", "Response_5", "Response_6", "Response_7", "Response_8", "Response_9", "Query_Type" | fields - _raw | dedup "Query" "View" | sort "Query" | eval ReportKey=dun] index= "syslog" NOERROR 10.217.154.253 OR 10.154.216.57 OR 10.194.41.30 earliest="11/29/2020:00:00:00" latest="11/29/2020:23:59:59" NOT EVRHSGSN1 NOT "*EPDG*" "*MNC001.MCC286*" "*tac-*" | fields "Query", "View", "Response_1", "Response_2", "Response_3", "Response_4", "Response_5", "Response_6", "Response_7", "Response_8", "Response_9", "Query_Type" | fields - _raw | dedup "Query" "View" | sort "Query" | eval ReportKey=bugun | append [search index= "syslog" NOERROR 10.217.154.253 OR 10.154.216.57 OR 10.194.41.30 earliest="11/28/2020:00:00:00" latest="11/28/2020:23:59:59" NOT EVRHSGSN1 NOT "*EPDG*" "*MNC001.MCC286*" "*tac-*" | fields "Query", "View", "Response_1", "Response_2", "Response_3", "Response_4", "Response_5", "Response_6", "Response_7", "Response_8", "Response_9", "Query_Type" | fields - _raw | dedup "Query" "View" | sort "Query" | eval ReportKey=dun]
Hi All, We are ingesting huge volume of logs from fluentd to splunk via HEC method. Will there be any loss as huge volumes are ingested (5GB) per day? If yes, how to rectify it? Is there any alter... See more...
Hi All, We are ingesting huge volume of logs from fluentd to splunk via HEC method. Will there be any loss as huge volumes are ingested (5GB) per day? If yes, how to rectify it? Is there any alternate method to ingest fluentd logs?
index=105261-cli sourcetype=show_processes_cpu pid=0 | dedup deviceId | fields deviceId, idle, fiveMinutes | eval cpuLoad = round(if(isnull(fiveMinutes), 100-idle, fiveMinutes)) | join deviceId ... See more...
index=105261-cli sourcetype=show_processes_cpu pid=0 | dedup deviceId | fields deviceId, idle, fiveMinutes | eval cpuLoad = round(if(isnull(fiveMinutes), 100-idle, fiveMinutes)) | join deviceId [ search index=105261-np sourcetype=device_details | fields deviceId, deviceName, productFamily, swVersion ] | eval Status=if((cpuLoad <85 ), "OK" , "Not OK" ) | table deviceName, productFamily, cpuLoad, swVersion, Status | sort - cpuLoad
Hi All,   Is it possible to send alert to users who have specific role? I am asking if it is possible to send alert based on roles. Saying this is the alert result. In the result, there is role ... See more...
Hi All,   Is it possible to send alert to users who have specific role? I am asking if it is possible to send alert based on roles. Saying this is the alert result. In the result, there is role information(column3) column1 column2 column3  AA             BB              users           ===> I would like to send this result to only users who have user role AAA          BBB           admin          ===> I would like to send this result to only users who have admin role   Is it possible? When a user who has power role logs in the Splunk web, the user should not see the alert. Your help would be appreciated.  
When you call SPL through the API, you occasionally get an inaccurate result from the execution.Query statistics return a smaller result value.   Looking at the SPL execution audit log, no executio... See more...
When you call SPL through the API, you occasionally get an inaccurate result from the execution.Query statistics return a smaller result value.   Looking at the SPL execution audit log, no execution process exceptions were found.   #spl # index=trade_successful_events trade_code IN( 6234,2205,2206,2207,2208,2502) earliest=@d|dedup trade_number | stats count | append [search index=hnb_bdp sourcetype=bdp_cbp_zjl TRAN_TIME=* earliest=@d|stats count] |stats sum(count) as count
Hi, Recently my company changed the email's domain name. I´m in the process of updating all the email addresses here and there and when splunk.com turn arrived I could not update the email address i... See more...
Hi, Recently my company changed the email's domain name. I´m in the process of updating all the email addresses here and there and when splunk.com turn arrived I could not update the email address in my profile. I do have access to the account, and was able to change my name and password, but could not manage to find where or how to update my email address! , it seems non-editable. I'm confused I cannot complete such a simple task. Unfortunately, it is not an option to go back to the previous email address (the easiest solution.) Thank you Miguel
Hi all, I am trying to create a correlation search query for "data exfiltration via email" using email datamodel the rule description is:  "Sending of multiple emails from a single user(outgoing em... See more...
Hi all, I am trying to create a correlation search query for "data exfiltration via email" using email datamodel the rule description is:  "Sending of multiple emails from a single user(outgoing email from a company employee) with total size exceeding threshold of 100MB within 30 minutes" my current correlation search is tstats summariesonly=true sum(All_Email.file_size) as file_size, dc(All_Email.recipient_count) as recipient_count FROM datamodel=Email.All_Email WHERE NOT All_Email.src_category="servers" BY "All_Email.src_user" _time | `drop_dm_object_name("All_Email")` | rex field=email "@(?<domain>.*)" | eval direction=if(in(domain, "xxx.com.au", "yyy.com.au", "aaa.com.au"), "outbound", "inbound") | eval file_size=round(file_size/(100000000)) | where file_size>100 and part of the result is as you can see the src_user values show "unknown" and I only see the inbound emails. Please share your opinion which you think it could enhance the correlation search. Thanks
My Splunk Add-On for Microsoft Office 365 only pulled in the last 7 days of data but in O365 I see older data.  How can I ingest older data?
If you read the title, you are going "well of course it does", but hear me out.   (This will be a long explanation that will hopefully answer the immediate questions)... Background: We have some ... See more...
If you read the title, you are going "well of course it does", but hear me out.   (This will be a long explanation that will hopefully answer the immediate questions)... Background: We have some on-prem UFs that forward "everything"  to our on-prem enterprise indexers AND specific logs  to our splunk cloud instance indexer.    In case you are wondering,  the cloud instance is where our customer can look at their data without needing access to our internal systems. Problem: Splunk did some maintenance on our cloud instance and when they did so, forwarding  from the UFs also stopped coming into our on-prem Splunk.    I can't figure out why cloud being down would stop the forwarders from sending to enterprise.     Checking the documentation here: https://docs.splunk.com/Documentation/Splunk/8.1.0/Forwarding/Setuploadbalancingd#Configure_universal_forwarder_load_balancing_for_horizontal_scaling It reads like the UFs should switch to the next indexers when it goes down.  But it didn't.  Instead we saw this in the internal logs when the cloud instance was taken down for maintenance  11-25-2020 21:59:48.139 -0600 WARN TcpOutputProc - The TCP output processor has paused the data flow. Forwarding to output group splunkcloud has been blocked for 1200 seconds. This will probably stall the data flow towards indexing and other network outputs. Review the receiving system's health in the Splunk Monitoring Console. It is probably not accepting data.   Looking at the inputs.conf and outputs.conf,  I can see nothing wrong with them to have the data blocked from these UFs Sanitized inputs.conf, with the log that gets sent to both the on-prem instance  (PP_indexers) and cloud instance  bolded  [monitor://C:\blahblahblah\q2.log] _TCP_ROUTING = pp_indexers index = fsd sourcetype = q2 [monitor://C:\blahblahblah\wrapper.log] _TCP_ROUTING = pp_indexers index = fsd_sandbox sourcetype = wrapper [monitor://C:\blahblahblah\metrics.log] _TCP_ROUTING = pp_indexers,splunkcloud index = fsd_sandbox sourcetype = metrics Sanitized outputs.conf:  defaultGroup = pp_indexers forceTimebasedAutoLB = true autoLBFrequency = 15 [tcpout:pp_indexers] server = indexer1.ip.address.here:9997, indexer2.ip.address.here:9997 [tcpout:splunkcloud] compressed = false disabled = false server = our_domain_name.cloud.splunk.com:9997 sslCommonNameToCheck = our_domain_name.cloud.splunk.com sslCertPath = $SPLUNK_HOME/etc/apps/sanitized/client.pem sslPassword = sanitized sslRootCAPath = $SPLUNK_HOME/etc/apps/sanitized/cacert.pem sslVerifyServerCert = true useACK = true Oh and just in case you need it... UF versions are 7.1.2 and 7.2.3 enterprise version is 7.3.4,  cloud is 7.3.
I have created a dashboard that is monitoring the number of events received at corporate to the number of events reported on a client's database.   This way we can be alerted when we have not receive... See more...
I have created a dashboard that is monitoring the number of events received at corporate to the number of events reported on a client's database.   This way we can be alerted when we have not received all events and can log into the client's database and manually pull that file.    Here's a partial screenshot of the current dashboard: Here is the search that produces the above dashboard:   | makeresults | fields - _time | eval eventName="CustCkIn,CustCkOut,EduAssigned,EduView,FTGChannelOff,NavHdiInteractive,PatientInfo,SncChromecastState,StayIdCrossReference,SurveyResponse,SysConfigRoomInfo," | makemv delim="," eventName | mvexpand eventName | append [| inputlookup EventValidationMonitoring.csv | addinfo | eval _time=strptime(date,"%Y-%m-%d") | where _time>=info_min_time AND _time<relative_time('info_min_time',"+604800") AND propertyId=0123456 | eval ValidationData=mvappend("nVerify=".nVerifyEvents,"nProperty=".nPropertyEvents,"nReceived=".nReceivedEvents,"Missing=".Missing) | table date eventName ValidationData | eval {date}=ValidationData | fields - date ValidationData ] | stats values(*) as * by eventName | fillnull value="Missing=Undetermined, nProperty=NA, nReceived=0, nVerify=0"   Is there a way I can format the fillnull string so that it has the same format as the other cells?
Hey folks,    I just installed the Duo Splunk Connector (v1.1.7) on a heavy forwarder running Splunk Enterprise v7.2.4.2.  The docs on Duo's site instructed me to install on an *indexer*, which isn... See more...
Hey folks,    I just installed the Duo Splunk Connector (v1.1.7) on a heavy forwarder running Splunk Enterprise v7.2.4.2.  The docs on Duo's site instructed me to install on an *indexer*, which isn't going to happen, I think they may need to update the docs a bit.     The app installs fine, and I followed the setup to add my integration key, my secret key, the API host, etc.  The only advanced option I changed was the index to send events to (and I *did* change the macro to the same value), I left everything else the same.    I'm getting *some* data - the overview page now shows values for Total Users, Telephony Credits, New Enrollments, and Bypass Codes, but the rest of the page remains "No results found."  All panels on the "Duo Authentication" view show "No results found."    So, searching for events from today shows three events with eventtype=account, and that's it.  Hmmmm, I know that it may take some time to pull events from their API, but I've let it sit for a bit and still only had the three events.    Looking in the splunkd.log on that HF, I find a number of Python errors periodically from the app: 12-02-2020 22:00:53.588 +0000 INFO ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" Running script 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" Traceback (most recent call last): 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" File "/opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py", line 382, in <module> 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" run_script() 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" File "/opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py", line 368, in run_script 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" log.run() 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" File "/opt/splunk/etc/apps/duo_splunkapp/bin/logclasses/paginated_base_log.py", line 56, in run 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" self.update_mintime_from_timestamp(last_timestamp_file_path) 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" File "/opt/splunk/etc/apps/duo_splunkapp/bin/logclasses/BaseLog.py", line 105, in update_mintime_from_timestamp 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" extracted_ts = int(f.read().strip()) 12-02-2020 22:00:53.739 +0000 ERROR ExecProcessor - message from "python /opt/splunk/etc/apps/duo_splunkapp/bin/duo_input.py" ValueError: invalid literal for int() with base 10: ''    Hmmm.  I'm unsure if this is what's preventing more data from coming in, or if I'm just not waiting long enough for the app to do its job.  I'd expect to see more events after 30 minutes, but I still only have the three and the errors appear each time the input attempts to run (every 120 seconds I believe).     Has anyone else run into this with this version of the app?  I need to get this data indexed for InfoSec and compliance reasons, but I'm hoping someone with some deeper knowledge knows what the issue is and can lend a hand before I start debugging Python. Thanks so much! Chris
Here is a sample of the search, can anyone help?  The query works and returns data but errors out on the output filename creation.   | dbxquery query="SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COL... See more...
Here is a sample of the search, can anyone help?  The query works and returns data but errors out on the output filename creation.   | dbxquery query="SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '$field1$' AND TABLE_NAME = '$field2$';" connection="123456" | table COLUMN_NAME| sort +COLUMN_NAME| outputcsv $field1$+$field2$.csv   Thanks in advance Rob
Hello,  I am working with a Linux system and a universal forwarder.    Operating System: Debian GNU/Linux 10 (buster) Kernel: Linux 4.19.0-12-amd64 Architecture: x86-64     wh... See more...
Hello,  I am working with a Linux system and a universal forwarder.    Operating System: Debian GNU/Linux 10 (buster) Kernel: Linux 4.19.0-12-amd64 Architecture: x86-64     when I checked opt/splunkforwarder/etc/system/local  and ran ls -l I noticed that root root had permission in there as well as splunk splunk. Should splunk splunk own everything in the universal forwarder directory?   -rw-r--r-- 1 root root 283 Apr 30 2020 inputs.conf -rw------- 1 root root 45 Apr 21 2020 migration.conf -rw-r--r-- 1 root root 222 Apr 23 2020 outputs.conf -r--r--r-- 1 splunk splunk 265 Mar 30 2020 README -rw------- 1 splunk splunk 431 Sep 23 2019 server.conf -rw-r--r-- 1 splunk splunk 65 Jun 3 13:38 user-seed.conf -rw-r--r-- 1 root root 40 Sep 23 2019 web.conf    
When I try and install an app (e.g. Cloudflare App for Splunk) I am prompted for my username and password. I provided that, but it says I have given a bad username or password. I've tried multiple ti... See more...
When I try and install an app (e.g. Cloudflare App for Splunk) I am prompted for my username and password. I provided that, but it says I have given a bad username or password. I've tried multiple times, as well as logged out and back in, of course using the same credentials, so I'm pretty sure I am providing the correct username and password.  Is the problem the account doesn't have the appropriate permissions to add an app? If so, what permission does it need? I'm logged in as the "sc_admin" account. I'm using a free cloud account I just started, in case that matters.