class Splunk::MultiResultsReader

Parser for the XML results sets returned by blocking export jobs.

The methods create_export and create_stream on Jobs and Service do not return data in quite the same format as other search jobs in Splunk. They will return a sequence of preview results sets, and then (if they are not real time searches) a final results set.

MultiResultsReader takes the stream returned by such a call, and provides iteration over each results set, or access to only the final, non-preview results set.

Examples:

require 'splunk-sdk-ruby'

service = Splunk::connect(:username => "admin", :password => "changeme")

stream = service.jobs.create_export("search index=_internal | head 10")

readers = MultiResultsReader.new(stream)
readers.each do |reader|
    puts "New result set (preview=#{reader.is_preview?})"
    reader.each do |result|
        puts result
    end
end

# Alternately
reader = readers.final_results()
reader.each do |result|
    puts result
end

Public Class Methods

new(text_or_stream) click to toggle source
# File lib/splunk-sdk-ruby/resultsreader.rb, line 588
def initialize(text_or_stream)
  if text_or_stream.nil?
    stream = StringIO.new("")
  elsif !text_or_stream.respond_to?(:read)
    # Strip because the XML libraries can be pissy.
    stream = StringIO.new(text_or_stream.strip)
  else
    stream = text_or_stream
  end

  listener = ResultsListener.new()
  @iteration_fiber = Fiber.new do
    if $splunk_xml_library == :nokogiri
      parser = Nokogiri::XML::SAX::Parser.new(listener)
      # Nokogiri requires a unique root element, which we are fabricating
      # here, while REXML is fine with multiple root elements in a stream.
      edited_stream = ConcatenatedStream.new(
          StringIO.new("<fake-root-element>"),
          XMLDTDFilter.new(stream),
          StringIO.new("</fake-root-element>")
      )
      parser.parse(edited_stream)
    else # Use REXML
      REXML::Document.parse_stream(stream, listener)
    end
  end
end

Public Instance Methods

each() { |e| ... } click to toggle source
# File lib/splunk-sdk-ruby/resultsreader.rb, line 616
def each()
  enum = Enumerator.new() do |yielder|
    if !@iteration_fiber.nil? # Handle the case of empty files
      begin
        while true
          is_preview = @iteration_fiber.resume
          fields = @iteration_fiber.resume
          reader = PuppetResultsReader.new(@iteration_fiber, is_preview, fields)
          yielder << reader
          # Finish extracting any events that the user didn't read.
          # Otherwise the next results reader will start in the middle of
          # the previous results set.
          reader.skip_remaining_results()
          reader.invalidate()
        end
      rescue FiberError
        # After the last result element, the next evaluation of
        # 'is_preview = @iteration_fiber.resume' above will throw a
        # +FiberError+ when the fiber terminates without yielding any
        # additional values. We handle the control flow in this way so
        # that the final code in the fiber to handle cleanup always gets
        # run.
      end
    end
  end

  if block_given? # Apply the enumerator to a block if we have one
    enum.each() { |e| yield e }
  else
    enum # Otherwise return the enumerator itself
  end
end
final_results() click to toggle source

Returns a ResultsReader over only the non-preview results.

If you run this method against a real time search job, which only ever produces preview results, it will loop forever. If you run it against a non-reporting system (that is, one that filters and extracts fields from events, but doesn't calculate a whole new set of events), you will get only the first few results, since you should be using the normal ResultsReader, not MultiResultsReader, in that case.

# File lib/splunk-sdk-ruby/resultsreader.rb, line 659
def final_results()
  each do |reader|
    if reader.is_preview?
      reader.skip_remaining_results()
    else
      return reader
    end
  end
end