feat: Disable job recording from plenary.

This should reduce memory usage at least for large jobs. We won't save
all the results into the job. I _think_ this will be half memory usage
for many situations.
This commit is contained in:
TJ DeVries
2020-09-21 15:39:40 -04:00
parent 2ca006fc97
commit 25b7895097
2 changed files with 6 additions and 2 deletions

View File

@@ -132,6 +132,8 @@ function JobFinder:_find(prompt, process_result, process_complete)
writer = writer, writer = writer,
enable_recording = false,
on_stdout = on_output, on_stdout = on_output,
on_stderr = on_output, on_stderr = on_output,

View File

@@ -660,7 +660,8 @@ pickers.entry_manager = function(max_results, set_entry, info)
log.debug("Creating entry_manager...") log.debug("Creating entry_manager...")
info = info or {} info = info or {}
info.items_looped = 0 info.looped = 0
info.inserted = 0
-- state contains list of -- state contains list of
-- { -- {
@@ -683,7 +684,7 @@ pickers.entry_manager = function(max_results, set_entry, info)
end end
for index, item in ipairs(entry_state) do for index, item in ipairs(entry_state) do
info.items_looped = info.items_looped + 1 info.looped = info.looped + 1
if item.score > score then if item.score > score then
return self:insert(index, { return self:insert(index, {
@@ -714,6 +715,7 @@ pickers.entry_manager = function(max_results, set_entry, info)
-- and then shift all the corresponding items one place. -- and then shift all the corresponding items one place.
local next_entry, last_score local next_entry, last_score
repeat repeat
info.inserted = info.inserted + 1
next_entry = entry_state[index] next_entry = entry_state[index]
set_entry(index, entry.entry) set_entry(index, entry.entry)