7 #include <packager/file/threaded_io_file.h>
9 #include <absl/log/check.h>
11 #include <packager/file/thread_pool.h>
15 ThreadedIoFile::ThreadedIoFile(std::unique_ptr<File, FileCloser> internal_file,
17 uint64_t io_cache_size,
18 uint64_t io_block_size)
19 : File(internal_file->file_name()),
20 internal_file_(std::move(internal_file)),
22 cache_(io_cache_size),
23 io_buffer_(io_block_size),
27 internal_file_error_(0),
29 flush_complete_(false),
31 DCHECK(internal_file_);
34 ThreadedIoFile::~ThreadedIoFile() {}
36 bool ThreadedIoFile::Open() {
37 DCHECK(internal_file_);
39 if (!internal_file_->Open())
43 size_ = internal_file_->Size();
45 ThreadPool::instance.PostTask(std::bind(&ThreadedIoFile::TaskHandler,
this));
49 bool ThreadedIoFile::Close() {
50 DCHECK(internal_file_);
53 if (mode_ == kOutputMode)
57 WaitForSignal(&task_exited_mutex_, &task_exited_);
59 result &= internal_file_.release()->Close();
64 int64_t ThreadedIoFile::Read(
void* buffer, uint64_t length) {
65 DCHECK(internal_file_);
66 DCHECK_EQ(kInputMode, mode_);
68 if (eof_.load(std::memory_order_relaxed) && !cache_.BytesCached())
71 if (internal_file_error_.load(std::memory_order_relaxed))
72 return internal_file_error_.load(std::memory_order_relaxed);
74 uint64_t bytes_read = cache_.Read(buffer, length);
75 position_ += bytes_read;
80 int64_t ThreadedIoFile::Write(
const void* buffer, uint64_t length) {
81 DCHECK(internal_file_);
82 DCHECK_EQ(kOutputMode, mode_);
84 if (internal_file_error_.load(std::memory_order_relaxed))
85 return internal_file_error_.load(std::memory_order_relaxed);
87 uint64_t bytes_written = cache_.Write(buffer, length);
88 position_ += bytes_written;
89 if (position_ > size_)
95 void ThreadedIoFile::CloseForWriting() {}
97 int64_t ThreadedIoFile::Size() {
98 DCHECK(internal_file_);
103 bool ThreadedIoFile::Flush() {
104 DCHECK(internal_file_);
105 DCHECK_EQ(kOutputMode, mode_);
107 if (internal_file_error_.load(std::memory_order_relaxed))
111 absl::MutexLock lock(&flush_mutex_);
113 flush_complete_ =
false;
117 WaitForSignal(&flush_mutex_, &flush_complete_);
119 return internal_file_->Flush();
122 bool ThreadedIoFile::Seek(uint64_t position) {
123 if (mode_ == kOutputMode) {
127 if (!internal_file_->Seek(position))
133 WaitForSignal(&task_exited_mutex_, &task_exited_);
135 bool result = internal_file_->Seek(position);
138 if (!internal_file_->Seek(position_) && (position != position_)) {
139 LOG(WARNING) <<
"Seek failed. ThreadedIoFile left in invalid state.";
145 ThreadPool::instance.PostTask(
146 std::bind(&ThreadedIoFile::TaskHandler,
this));
150 position_ = position;
154 bool ThreadedIoFile::Tell(uint64_t* position) {
157 *position = position_;
161 void ThreadedIoFile::TaskHandler() {
163 absl::MutexLock lock(&task_exited_mutex_);
164 task_exited_ =
false;
167 if (mode_ == kInputMode)
173 absl::MutexLock lock(&task_exited_mutex_);
178 void ThreadedIoFile::RunInInputMode() {
179 DCHECK(internal_file_);
180 DCHECK_EQ(kInputMode, mode_);
183 int64_t read_result =
184 internal_file_->Read(&io_buffer_[0], io_buffer_.size());
185 if (read_result <= 0) {
186 eof_.store(read_result == 0, std::memory_order_relaxed);
187 internal_file_error_.store(read_result, std::memory_order_relaxed);
191 if (cache_.Write(&io_buffer_[0], read_result) == 0) {
197 void ThreadedIoFile::RunInOutputMode() {
198 DCHECK(internal_file_);
199 DCHECK_EQ(kOutputMode, mode_);
202 uint64_t write_bytes = cache_.Read(&io_buffer_[0], io_buffer_.size());
203 if (write_bytes == 0) {
204 absl::MutexLock lock(&flush_mutex_);
208 flush_complete_ =
true;
213 uint64_t bytes_written(0);
214 while (bytes_written < write_bytes) {
215 int64_t write_result = internal_file_->Write(
216 &io_buffer_[bytes_written], write_bytes - bytes_written);
217 if (write_result < 0) {
218 internal_file_error_.store(write_result, std::memory_order_relaxed);
221 absl::MutexLock lock(&flush_mutex_);
224 flush_complete_ =
true;
228 bytes_written += write_result;
234 void ThreadedIoFile::WaitForSignal(absl::Mutex* mutex,
bool* condition) {
239 mutex->LockWhen(absl::Condition(condition));
All the methods that are virtual are virtual for mocking.